source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_utility.py
|
import threading
import pytest
from base.client_base import TestcaseBase
from base.utility_wrapper import ApiUtilityWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.milvus_sys import MilvusSys
prefix = "utility"
default_schema = cf.gen_default_collection_schema()
default_int64_field_name = ct.default_int64_field_name
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_dim = ct.default_dim
default_nb = ct.default_nb
num_loaded_entities = "num_loaded_entities"
num_total_entities = "num_total_entities"
class TestUtilityParams(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if isinstance(request.param, str):
pytest.skip("string is valid type for metric")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_value(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if not isinstance(request.param, str):
pytest.skip("Skip invalid type for metric")
yield request.param
@pytest.fixture(scope="function", params=["JACCARD", "Superstructure", "Substructure"])
def get_not_support_metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def get_support_metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition_names(self, request):
if isinstance(request.param, list):
if len(request.param) == 0:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_collection with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_collection(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
# elif not isinstance(c_name, str): self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res,
# check_items={ct.err_code: 1, ct.err_msg: "illegal"})
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_partition with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
p_name = cf.gen_unique_str(prefix)
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_name_invalid(self, get_invalid_partition_name):
"""
target: test has_partition with error partition name
method: input invalid name
expected: raise exception
"""
self._connect()
ut = ApiUtilityWrapper()
c_name = cf.gen_unique_str(prefix)
p_name = get_invalid_partition_name
if isinstance(p_name, str) and p_name:
ex, _ = ut.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_name_invalid(self, get_invalid_collection_name):
self._connect()
error = f'`collection_name` value {get_invalid_collection_name} is illegal'
self.utility_wrap.drop_collection(get_invalid_collection_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: error})
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_using_invalid(self):
"""
target: test list_collections with invalid using
method: input invalid name
expected: raise exception
"""
self._connect()
using = "empty"
ut = ApiUtilityWrapper()
ex, _ = ut.list_collections(using=using, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_invalid_name(self, get_invalid_collection_name):
"""
target: test building_process
method: input invalid name
expected: raise exception
"""
pass
# self._connect() c_name = get_invalid_collection_name ut = ApiUtilityWrapper() if isinstance(c_name,
# str) and c_name: ex, _ = ut.index_building_progress(c_name, check_items={ct.err_code: 1, ct.err_msg:
# "Invalid collection name"})
# TODO: not support index name
@pytest.mark.tags(CaseLabel.L1)
def _test_index_process_invalid_index_name(self, get_invalid_index_name):
"""
target: test building_process
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.index_building_progress(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L2)
def test_wait_index_invalid_name(self, get_invalid_collection_name):
"""
target: test wait_index
method: input invalid name
expected: raise exception
"""
pass
# self._connect()
# c_name = get_invalid_collection_name
# ut = ApiUtilityWrapper()
# if isinstance(c_name, str) and c_name:
# ex, _ = ut.wait_for_index_building_complete(c_name,
# check_items={ct.err_code: 1,
# ct.err_msg: "Invalid collection name"})
@pytest.mark.tags(CaseLabel.L1)
def _test_wait_index_invalid_index_name(self, get_invalid_index_name):
"""
target: test wait_index
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.wait_for_index_building_complete(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("invalid_c_name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
def test_loading_progress_invalid_collection_name(self, invalid_c_name):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(invalid_c_name)}
self.utility_wrap.loading_progress(invalid_c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_loading_progress_not_existed_collection_name(self):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "describe collection failed: can't find collection"}
self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tag(CaseLabel.L2)
@pytest.mark.xfail(reason="pymilvus issue #677")
def test_loading_progress_invalid_partition_names(self, get_invalid_partition_names):
"""
target: test loading progress with invalid partition names
method: input invalid partition names
expected: raise an exception
"""
collection_w = self.init_collection_general(prefix)[0]
partition_names = get_invalid_partition_names
err_msg = {ct.err_code: 0, ct.err_msg: "`partition_name_array` value {} is illegal".format(partition_names)}
collection_w.load()
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tag(CaseLabel.L2)
@pytest.mark.parametrize("partition_names", [[ct.default_tag], [ct.default_partition_name, ct.default_tag]])
def test_loading_progress_not_existed_partitions(self, partition_names):
"""
target: test loading progress with not existed partitions
method: input all or part not existed partition names
expected: raise exception
"""
collection_w = self.init_collection_general(prefix)[0]
log.debug(collection_w.num_entities)
collection_w.load()
err_msg = {ct.err_code: 1, ct.err_msg: f"partitionID of partitionName:{ct.default_tag} can not be found"}
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tags(CaseLabel.L2)
def test_wait_for_loading_collection_not_existed(self):
"""
target: test wait for loading
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_loading_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_wait_for_loading_partition_not_existed(self):
"""
target: test wait for loading
method: input partition not created before
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
self.utility_wrap.wait_for_loading_complete(
collection_w.name, partition_names=[ct.default_tag],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: f'partitionID of partitionName:{ct.default_tag} can not be find'})
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_not_existed(self):
"""
target: test drop an not existed collection
method: drop a not created collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 1, ct.err_msg: f"DescribeCollection failed: can't find collection: {c_name}"}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_type(self, get_support_metric_field, get_invalid_metric_type):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_type
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "params value {{'metric': {}}} "
"is illegal".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_value(self, get_support_metric_field, get_invalid_metric_value):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_value
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_not_support_metric(self, get_support_metric_field, get_not_support_metric):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_not_support_metric
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_using(self, get_support_metric_field):
"""
target: test calculated distance with invalid using
method: input invalid using
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
using = "empty"
self.utility_wrap.calc_distance(op_l, op_r, params, using=using,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect"})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_not_match_dim(self):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type and value
expected: raise exception
"""
self._connect()
dim = 129
vector_l = cf.gen_vectors(default_nb, default_dim)
vector_r = cf.gen_vectors(default_nb, dim)
op_l = {"float_vectors": vector_l}
op_r = {"float_vectors": vector_r}
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Cannot calculate distance between "
"vectors with different dimension"})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_collection_before_load(self, get_support_metric_field):
"""
target: test calculated distance when entities is not ready
method: calculate distance before load
expected: raise exception
"""
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb,
is_index=True)
middle = len(insert_ids) // 2
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection {} was not "
"loaded into memory)".format(collection_w.name)})
class TestUtilityBase(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=[True, False])
def sqrt(self, request):
yield request.param
@pytest.fixture(scope="function", params=["L2", "IP"])
def metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["HAMMING", "TANIMOTO"])
def metric_binary(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
cw = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(cw.name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_not_created(self):
"""
target: test has_collection with collection name which is not created
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
_ = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection_after_drop(self):
"""
target: test has_collection with collection name droped before
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_collection(c_name)
assert res is True
cw.drop()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition(self):
"""
target: test has_partition with partition name
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_not_created(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name not created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_after_drop(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name dropped
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
cw = self.init_collection_wrap(name=c_name)
pw = self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
pw.drop()
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L2)
def test_has_default_partition(self):
"""
target: test has_partition with '_default' partition
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, ct.default_partition_name)
assert res is True
@pytest.mark.tags(CaseLabel.L1)
def test_list_collections(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.list_collections()
assert c_name in res
# TODO: make sure all collections deleted
@pytest.mark.tags(CaseLabel.L1)
def _test_list_collections_no_collection(self):
"""
target: test list_collections
method: no collection created, list_collections
expected: length of the result equals to 0
"""
self._connect()
res, _ = self.utility_wrap.list_collections()
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_index_process_collection_not_existed(self):
"""
target: test building_process
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.index_building_progress(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_empty(self):
"""
target: test building_process
method: input empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L2)
def test_index_process_collection_insert_no_index(self):
"""
target: test building_process
method: insert 1 entity, no index created
expected: no exception raised
"""
nb = 1
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
error = {ct.err_code: 1, ct.err_msg: "no index is created"}
self.utility_wrap.index_building_progress(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_index(self):
"""
target: test building_process
method: 1.insert 1024 (because minSegmentSizeToEnableIndex=1024)
2.build(server does create index) and call building_process
expected: indexed_rows=0
"""
nb = 1024
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res['indexed_rows'] == 0
assert res['total_rows'] == nb
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_indexing(self):
"""
target: test building_process
method: 1.insert 2048 entities to ensure that server will build
2.call building_process during building
expected: 2048 or less entities indexed
"""
nb = 2048
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert (0 < res['indexed_rows'] <= nb)
assert res['total_rows'] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_wait_index_collection_not_existed(self):
"""
target: test wait_index
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_index_building_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_empty(self):
"""
target: test wait_index
method: input empty collection
expected: no exception raised
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw.create_index(default_field_name, default_index_params)
assert self.utility_wrap.wait_for_index_building_complete(c_name)[0]
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_index(self):
"""
target: test wait_index
method: insert 5000 entities, build and call wait_index
expected: 5000 entity indexed
"""
nb = 5000
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.wait_for_index_building_complete(c_name)
assert res is True
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res["indexed_rows"] == nb
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_without_loading(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.parametrize("nb", [ct.default_nb, 5000])
def test_loading_progress_collection(self, nb):
"""
target: test loading progress
method: 1.insert flush and load 2.call loading_progress
expected: all entities is loafed, because load is synchronous
"""
# create, insert default_nb, flush and load
collection_w = self.init_collection_general(prefix, insert_data=True, nb=nb)[0]
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_total_entities] == nb
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_async_load(self):
"""
target: test loading progress with async collection load
method: 1.load collection with async=True 2.loading_progress
expected: loading part entities
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load(_async=True)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert (0 <= res[num_loaded_entities] <= ct.default_nb)
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_empty_collection(self):
"""
target: test loading_progress on an empty collection
method: 1.create collection and no insert 2.loading_progress
expected: 0 entities is loaded
"""
collection_w = self.init_collection_wrap()
collection_w.load()
res, _ = self.utility_wrap.loading_progress(collection_w.name)
exp_res = {num_loaded_entities: 0, num_total_entities: 0}
assert exp_res == res
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_after_release(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
collection_w.release()
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_release_partition(self):
"""
target: test loading progress after release part partitions
method: 1.insert data into two partitions and flush
2.load one partiton and release one partition
expected: loaded one partition entities
"""
half = ct.default_nb
# insert entities into two partitions, collection flush and load
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
partition_w.release()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_load_partition(self):
"""
target: test loading progress after load partition
method: 1.insert data into two partitions and flush
2.load one partition and loading progress
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
collection_w.release()
partition_w.load()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_with_partition(self):
"""
target: test loading progress with partition
method: 1.insert data into two partitions and flush, and load
2.loading progress with one partition
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
res = self.utility_wrap.loading_progress(collection_w.name, partition_names=[partition_w.name])[0]
assert res[num_total_entities] == half
assert res[num_loaded_entities] == half
@pytest.mark.tags(CaseLabel.L2)
def test_wait_loading_collection_empty(self):
"""
target: test wait_for_loading
method: input empty collection
expected: no exception raised
"""
self._connect()
cw = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
cw.load()
self.utility_wrap.wait_for_loading_complete(cw.name)
res, _ = self.utility_wrap.loading_progress(cw.name)
exp_res = {num_total_entities: 0, num_loaded_entities: 0}
assert res == exp_res
@pytest.mark.tag(CaseLabel.L1)
def test_wait_for_loading_complete(self):
"""
target: test wait for loading collection
method: insert 10000 entities and wait for loading complete
expected: after loading complete, loaded entities is 10000
"""
nb = 6000
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df, timeout=60)
assert collection_w.num_entities == nb
collection_w.load(_async=True)
self.utility_wrap.wait_for_loading_complete(collection_w.name)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L0)
def test_drop_collection(self):
"""
target: test utility drop collection by name
method: input collection name and drop collection
expected: collection is dropped
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tag(CaseLabel.L0)
def test_drop_collection_repeatedly(self):
"""
target: test drop collection repeatedly
method: 1.collection.drop 2.utility.drop_collection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
collection_w.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tag(CaseLabel.L2)
def test_drop_collection_create_repeatedly(self):
"""
target: test repeatedly create and drop same name collection
method: repeatedly create and drop collection
expected: no exception
"""
from time import sleep
loops = 3
c_name = cf.gen_unique_str(prefix)
for _ in range(loops):
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
sleep(1)
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_default(self):
"""
target: test calculated distance with default params
method: calculated distance between two random vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors")
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_sqrt(self, metric_field, metric):
"""
target: test calculated distance with default param
method: calculated distance with default sqrt
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default sqrt")
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_metric(self, sqrt):
"""
target: test calculated distance with default param
method: calculated distance with default metric
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default metric")
params = {"sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_binary_metric(self, metric_field, metric_binary):
"""
target: test calculate distance with binary vectors
method: calculate distance between binary vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
nb = 10
raw_vectors_l, vectors_l = cf.gen_binary_vectors(nb, default_dim)
raw_vectors_r, vectors_r = cf.gen_binary_vectors(nb, default_dim)
op_l = {"bin_vectors": vectors_l}
op_r = {"bin_vectors": vectors_r}
log.info("Calculating distance for binary vectors")
params = {metric_field: metric_binary}
vectors_l = raw_vectors_l
vectors_r = raw_vectors_r
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric_binary})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_from_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: both left and right vectors are from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
log.info("Creating vectors from collections for distance calculation")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
log.info("Creating vectors for entities")
params = {metric_field: metric, "sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_collections(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from collections
method: calculated distance between entities from two collections
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
prefix_1 = "utility_distance"
log.info("Creating two collections")
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
collection_w_1, vectors_1, _, insert_ids_1, _ = self.init_collection_general(prefix_1, True, nb)
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors_1[0].loc[:, default_field_name]
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids, "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids_1, "collection": collection_w_1.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance for entities from two collections")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set left vectors as random vectors, right vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = cf.gen_vectors(nb, default_dim)
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
op_l = {"float_vectors": vectors_l}
log.info("Extracting entities from collections for distance calculating")
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between vectors and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set right vectors as random vectors, left vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = cf.gen_vectors(nb, default_dim)
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between right vector and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from one partition entities
method: both left and right vectors are from partition
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
log.info("Extracting entities from partitions for distance calculating")
vectors_l = vectors[i].loc[:, default_field_name]
vectors_r = vectors[i].loc[:, default_field_name]
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculating distance between entities from one partition")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partitions(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from partitions
method: calculate distance between entities from two partitions
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors[1].loc[:, default_field_name]
log.info("Extract entities from two partitions for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"partition": partitions[0].name, "field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"partition": partitions[1].name, "field": default_field_name}
log.info("Calculate distance between entities from two partitions")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set left vectors as random vectors, right vectors are entities
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_l = cf.gen_vectors(nb // 2, default_dim)
log.info("Extract entities from collection as right vectors")
op_l = {"float_vectors": vectors_l}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
log.info("Calculate distance between vector and entities")
for i in range(len(partitions)):
vectors_r = vectors[i].loc[:, default_field_name]
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set right vectors as random vectors, left vectors are entities
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_r = cf.gen_vectors(nb // 2, default_dim)
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
vectors_l = vectors[i].loc[:, default_field_name]
log.info("Extract entities from partition %d as left vector" % i)
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculate distance between vector and entities from partition %d" % i)
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
class TestUtilityAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multi_collections(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
for name in [c_name, c_name_2]:
res, _ = self.utility_wrap.has_collection(name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multi_collection(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
res, _ = self.utility_wrap.list_collections()
for name in [c_name, c_name_2]:
assert name in res
@pytest.mark.tag(CaseLabel.L2)
def test_drop_multi_collection_concurrent(self):
"""
target: test concurrent drop collection
method: multi thread drop one collection
expected: drop successfully
"""
thread_num = 3
threads = []
c_names = []
num = 5
for i in range(thread_num * num):
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
c_names.append(c_name)
def create_and_drop_collection(names):
for name in names:
assert self.utility_wrap.has_collection(name)[0]
self.utility_wrap.drop_collection(name)
assert not self.utility_wrap.has_collection(name)[0]
for i in range(thread_num):
x = threading.Thread(target=create_and_drop_collection, args=(c_names[i * num:(i + 1) * num],))
threads.append(x)
x.start()
for t in threads:
t.join()
log.debug(self.utility_wrap.list_collections()[0])
@pytest.mark.tags(CaseLabel.L2)
def test_get_query_segment_info_empty_collection(self):
"""
target: test getting query segment info of empty collection
method: init a collection and get query segment info
expected: length of segment is 0
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_get_growing_query_segment_info(self):
"""
target: test getting growing query segment info of collection with data
method: init a collection, insert data, load, search, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
import random
dim = 128
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
nq = 2
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.load()
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors, default_field_name, ct.default_search_params, ct.default_limit)
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.L1)
def test_get_sealed_query_segment_info(self):
"""
target: test getting sealed query segment info of collection with data
method: init a collection, insert data, flush, load, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.num_entities
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.L1)
def test_get_sealed_query_segment_info_after_create_index(self):
"""
target: test getting sealed query segment info of collection with data
method: init a collection, insert data, flush, create index, load, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.num_entities
collection_w.create_index(default_field_name, default_index_params)
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.Loadbalance)
def test_load_balance_normal(self):
"""
target: test load balance of collection
method: init a collection and load balance
expected: sealed_segment_ids is subset of des_sealed_segment_ids
"""
# init a collection
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
ms = MilvusSys()
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
# get sealed segments
collection_w.num_entities
# get growing segments
collection_w.insert(df)
collection_w.load()
# prepare load balance params
res, _ = self.utility_wrap.get_query_segment_info(c_name)
segment_distribution = cf.get_segment_distribution(res)
all_querynodes = [node["identifier"] for node in ms.query_nodes]
assert len(all_querynodes) > 1
all_querynodes = sorted(all_querynodes,
key=lambda x: len(segment_distribution[x]["sealed"])
if x in segment_distribution else 0, reverse=True)
src_node_id = all_querynodes[0]
des_node_ids = all_querynodes[1:]
sealed_segment_ids = segment_distribution[src_node_id]["sealed"]
# load balance
self.utility_wrap.load_balance(src_node_id, des_node_ids, sealed_segment_ids)
# get segments distribution after load balance
res, _ = self.utility_wrap.get_query_segment_info(c_name)
segment_distribution = cf.get_segment_distribution(res)
des_sealed_segment_ids = []
for des_node_id in des_node_ids:
des_sealed_segment_ids += segment_distribution[des_node_id]["sealed"]
# assert sealed_segment_ids is subset of des_sealed_segment_ids
assert set(sealed_segment_ids).issubset(des_sealed_segment_ids)
|
send.py
|
#!/usr/bin/env python3.6
import pika
import sys,datetime,time
import multiprocessing,logging
logger = logging.getLogger("MQ_SENDER")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("/var/log/mq_sender.log")
fh.setLevel(logging.DEBUG)
#ch = logging.StreamHandler()
#ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(process)d] %(message)s")
#ch.setFormatter(formatter)
fh.setFormatter(formatter)
#logger.addHandler(ch)
logger.addHandler(fh)
def sender():
credentials = pika.PlainCredentials('admin','app')
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.200.201',5672,'test',credentials))
channel = connection.channel()
channel.queue_declare(queue='deploy', durable=True)
try:
for i in range(1,100001):
#message = datetime.datetime.now().strftime('%Y_%m_%d_%H:%M:%S.%f')
channel.basic_publish(exchange='',routing_key='deploy',body=str(i),properties=pika.BasicProperties(delivery_mode=2))
logger.info(f'[sender]{i}')
time.sleep(1)
connection.close()
except KeyboardInterrupt:
connection.close()
print('EXIT!')
for i in range(600):
p = multiprocessing.Process(target=sender)
p.start()
|
grid_world.py
|
# -*- coding: utf-8 -*-
'''
to run this , sudo apt-get install python3-pyqt5
'''
import sys
from PyQt5.QtCore import QPoint, QRect, QSize, Qt
from PyQt5.QtGui import (QBrush, QPainter, QColor, QPen )
from PyQt5.QtWidgets import (QApplication, QPushButton, QCheckBox, QGridLayout,QLabel, QWidget, QInputDialog, QTextEdit ,QDialog )
import numpy as np
from random import randint
import time
import threading
###############################################
class InputDialog(QDialog):
def __init__(self, parent, str ):
super(InputDialog,self).__init__(parent)
self.initUi(str)
def initUi(self, str ):
self.setWindowTitle("重新定义地图")
self.setGeometry(400,400,300,260)
self.edit = QTextEdit(str,self)
self.edit.move(10,10)
self.edit.resize( 280 ,240 )
class grid_world(QWidget):
'''
用来实验各种算法,并图形展示,基于方块地图
关键的成员说明:
config_gui : 定制界面元素,字体,按钮内容 , 修改后可扩展
self.block_size = 40 每个方格的像素大小
config_map : 定义 self.map_mask , 定制整个地图
map_width : int , 网格横向个数
map_height: int , 网格纵向个数
map : ndarray, map[x][y] -> int map value
'''
def __init__(self):
super(grid_world, self).__init__()
self.initUI()
# ----------------------------- 子类继承,一般也需要调用基类函数的部分 ------------------------------------
# ----- super( cls_name , self).config_gui() ...
def add_ctrl( self, cls , str , step = 25 ):
ctrl = cls(str,self)
self.ctrls_bottom_height += step
ctrl.move(10, self.ctrls_bottom_height )
ctrl.resize(85,18)
self.ctrls.append(ctrl)
return ctrl
def config_gui(self):
# 界面组件 init & run
font = self.font()
font.setPointSize(8)
self.setFont(font)
self.pen = QPen()
self.brush = QBrush()
self.ctrls_bottom_height = 10
self.ctrls = []
self.btnreset = self.add_ctrl( QPushButton , 'reset' ,0)
self.btnreset.clicked.connect(self.reset)
self.btnrun = self.add_ctrl( QPushButton , 'run' )
self.btnrun.clicked.connect(self.run)
self.btnstop = self.add_ctrl( QPushButton , 'stop' )
self.btnstop.clicked.connect(self.stop)
self.btnstop = self.add_ctrl( QPushButton , 'redefine map' )
self.btnstop.clicked.connect(self.redefine )
self.bkcolor = QColor(22,111,22)
self.log_text = '欢迎来到网格世界'
# 方块大小 , 基础的绘制布局配置
self.show_direction = True
self.block_size = 40
self.map_topleft = (110,10)
def redefine(self):
myshow=InputDialog(self,self.map_mask)
myshow.show()
def calc_map(self):
''' 根据之前的设置,计算整体大小 , 初始化 map '''
mask = self.map_mask.strip()
mask = mask.replace('\t','')
mask = mask.replace(' ','')
mask = mask.split('\n')
self.map_height = len(mask)
self.map_width = max([ len(x) for x in mask ])
self.map = np.ndarray( ( self.map_width , self.map_height ) , dtype=np.int8 )
for y in range(self.map_height):
if len(mask[y]) == self.map_width:
for x in range(self.map_width):
self.map[x][y] = int( mask[y][x] )
else:
for x in range(len(mask[y])):
self.map[x][y] = int( mask[y][x] )
x = len(mask[y])
while x < self.map_width:
self.map[x][y] = 0 # 0 as default
# -------------------------- 子类一般重写的部分 , 也不调用基类部分 --------------------
def config_map(self):
# 地图大小 , 初始map , 供子类再定义
self.map_mask = '''
1111 1111 1111 1111
1122 1212 1331 2212
1122 1212 1331 2212
1122 1212 1331 2212
1122 1212 1331 2212
1122 1212 1331 2212
2111 1111 1222 2112
''' # map mask must be plus i8 (0 ~ 127 )
def reset(self):
'''
初始化地图,以及计算条件,可能会反复运算
'''
# do nothing in base class
self.update()
def run_proc(self):
''' 此处是一个示范代码,子类应沿袭相同结构 '''
# setup flag
self.running = True
while self.running:
# do your job
pass
self.running = False
def draw_block(self, painter,x,y , block_map_value = None ):
''' 具体的绘制每个方格的逻辑,基本控制了所有的绘制逻辑 '''
painter.save()
if block_map_value is None:
block_map_value = self.map[x][y]
bkcolor = {0:(55,55,55) , 1:(222,255,222) , 2 :(111,111,111) , 3:(111,255,111) }.get( block_map_value , (0,0,0) )
self.draw_background( painter, x,y , bkcolor )
self.draw_text( painter , x,y , str(block_map_value) , (0,0,0) , 'd' )
self.draw_arrow(painter,x,y, 'urdl'[ block_map_value ] , (255,0,0) , 'u' )
painter.restore()
# --------------------------------- 基本不碰的部分 -----------------------------
def initUI(self):
# 界面组件 init & run
self.config_gui()
# 地图大小 , 初始map , 供子类再定义
self.config_map()
self.calc_map() # 解析 map_mask
self.window_width = self.map_topleft[0] + self.block_size * self.map_width + 10
self.window_height = self.map_topleft[1] + self.block_size * self.map_height + 10 + 25
if self.window_width < 300:
self.window_width = 300
if self.window_height < 200:
self.window_height = 200
if self.window_height < self.ctrls_bottom_height + 50:
self.window_height = self.ctrls_bottom_height + 50
self.setGeometry(300, 300, self.window_width , self.window_height )
self.setWindowTitle('grid_world')
self.show()
self.running_thread = None
self.running = False
self.reset()
def run(self):
''' 运行算法 '''
self.running_thread = threading.Thread( target=self.run_proc )
self.running_thread.start()
def stop(self):
if self.running_thread is not None:
self.running = False
self.running_thread.join()
self.running_thread = None
def set_log(self,text):
self.log_text = text
self.update()
def get_grid_range(self,x,y):
'''
输入,逻辑网格的坐标 x,y ,
返回 网格区域的 范围 top_left_x , top_left_y , width , height
'''
return self.map_topleft[0] + self.block_size * x,self.map_topleft[1] + self.block_size * y , self.block_size ,self.block_size
def draw_background(self,painter, x,y, color ):
'''
绘制特定颜色的 格子的背景
输入:
x,y 逻辑网格的坐标
color : QColor 或者 (r,g,b)
'''
painter.setPen(self.bkcolor)
if isinstance( color , QColor ):
painter.setBrush( color )
else:
painter.setBrush(QColor( color[0] , color[1] , color[2] ))
x,y,cx,cy = self.get_grid_range( x,y )
painter.drawRect(x, y, cx-1, cy-1)
def draw_text(self, painter , x, y , text , color = None, pos_flag ='c' ):
'''
pos_flag : 'c' : 整个格子正中央
'l' : 格子 左侧
'r' , 'u' , 'd' ...
x,y : 网格地图逻辑坐标
color : None 不改变当前颜色 , QColor 或 (r,g,b) 则以特定颜色绘制
'''
x,y,cx,cy = self.get_grid_range( x,y )
rg = QRect( x,y,cx-1,cy-1)
if pos_flag == 'c':
pass
elif pos_flag == 'l':
rg = QRect( x,y,cx/2,cy-1)
elif pos_flag == 'r':
rg = QRect( x+cx/2,y,cx/2,cy-1)
elif pos_flag == 'u':
rg = QRect( x,y,cx-1,cy/2)
elif pos_flag == 'd':
rg = QRect( x,y+cy/2,cx-1,cy/2)
if color is not None:
if isinstance( color , QColor ):
painter.setPen( color )
else:
painter.setPen(QColor( color[0] , color[1] , color[2] ))
painter.drawText( rg , Qt.AlignCenter, text )
def draw_arrow(self, painter , x, y , direction , color = None, pos_flag ='c' ):
'''
direction
pos_flag : 'c' : 整个格子正中央
'l' : 格子 左侧
'r' , 'u' , 'd' ...
x,y : 网格地图逻辑坐标
color : None 不改变当前颜色 , QColor 或 (r,g,b) 则以特定颜色绘制
'''
x,y,cx,cy = self.get_grid_range( x,y )
centerx = x + cx//2
centery = y + cy//2
maxsz = min((cx,cy))
if pos_flag == 'c':
pass
elif pos_flag == 'l':
centerx = x + cx//4
maxsz = min((cx//2,cy))
elif pos_flag == 'r':
centerx = x + cx//2 + cx//4
maxsz = min((cx//2,cy))
elif pos_flag == 'u':
centery = y + cy//4
maxsz = min((cx,cy//2))
elif pos_flag == 'd':
centery = y + cy//2 + cy//4
maxsz = min(cx,cy//2)
maxsz = maxsz * 0.7
arsz = maxsz//4
if arsz < 2:
arsz = 2
if color is not None:
if isinstance( color , QColor ):
painter.setPen( color )
else:
painter.setPen(QColor( color[0] , color[1] , color[2] ))
if direction == 'l':
pt1 = ( centerx + maxsz//2 , centery )
pt2 = ( centerx - maxsz//2 , centery )
painter.drawLine( pt1[0] , pt1[1] , pt2[0] , pt2[1] )
painter.drawLine( pt2[0] , pt2[1] , pt2[0] + arsz , pt2[1] - arsz)
painter.drawLine( pt2[0] , pt2[1] , pt2[0] + arsz , pt2[1] + arsz)
elif direction == 'r':
pt2 = ( centerx + maxsz//2 , centery )
pt1 = ( centerx - maxsz//2 , centery )
painter.drawLine( pt1[0] , pt1[1] , pt2[0] , pt2[1] )
painter.drawLine( pt2[0] , pt2[1] , pt2[0] - arsz , pt2[1] - arsz)
painter.drawLine( pt2[0] , pt2[1] , pt2[0] - arsz , pt2[1] + arsz)
elif direction == 'u':
pt2 = ( centerx , centery- maxsz//2 )
pt1 = ( centerx , centery+ maxsz//2 )
painter.drawLine( pt1[0] , pt1[1] , pt2[0] , pt2[1] )
painter.drawLine( pt2[0] , pt2[1] , pt2[0] + arsz , pt2[1] + arsz)
painter.drawLine( pt2[0] , pt2[1] , pt2[0] - arsz , pt2[1] + arsz)
elif direction == 'd':
pt1 = ( centerx , centery- maxsz//2 )
pt2 = ( centerx , centery+ maxsz//2 )
painter.drawLine( pt1[0] , pt1[1] , pt2[0] , pt2[1] )
painter.drawLine( pt2[0] , pt2[1] , pt2[0] + arsz , pt2[1] - arsz)
painter.drawLine( pt2[0] , pt2[1] , pt2[0] - arsz , pt2[1] - arsz)
else:
painter.drawRect( QRect(centerx - maxsz//3 , centery - maxsz//3 , maxsz * 0.66 , maxsz * 0.66 ) )
def drawMap(self, painter):
''' draw the map based on self.map '''
painter.save()
# draw back ground
painter.setBrush(self.bkcolor)
painter.drawRect( self.map_topleft[0] - 2 , self.map_topleft[1] - 2,
self.block_size * self.map_width + 4 ,
self.block_size * self.map_height + 4 )
for x in range(self.map_width):
for y in range(self.map_height):
self.draw_block(painter,x,y, self.map[x][y] )
painter.restore()
def drawLog(self, painter):
painter.save()
painter.setBrush(QColor( 255,255,255 ))
painter.drawRect( 10 , self.window_height - 22 , self.window_width - 20 , 15 )
logrg2 = QRect( 12 , self.window_height - 22 + 3 , self.window_width - 20 -4 , 12 )
painter.drawText( logrg2, Qt.AlignLeft , self.log_text )
painter.restore()
def paintEvent(self, event):
'绘制界面'
self.cx = self.width()
self.cy = self.height()
painter = QPainter(self)
painter.save()
self.drawMap(painter)
try:
self.drawMap(painter)
self.drawLog(painter)
except Exception as e:
painter.setPen(QColor( 255,0,0 ))
painter.drawText( QRect( self.map_topleft[0] , self.map_topleft[1] , 400,300 ) , Qt.AlignCenter,
"paintEvent met exception:\n" + repr(e) + str(e) )
painter.restore()
def closeEvent(self, event):
self.stop()
event.accept()
def run_gui( gui_cls ):
app = QApplication(sys.argv)
ex = gui_cls()
sys.exit(app.exec_())
if __name__ == '__main__':
run_gui( grid_world )
|
s2e_remote_memory.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from builtins import dict
from builtins import int
from future import standard_library
standard_library.install_aliases()
import threading
import logging
import sys
import json
import socket
import time
from select import select
log = logging.getLogger("avatar.remote_memory_interface")
class RemoteMemoryInterface(object):
def __init__(self):
self._read_handler = None
self._write_handler = None
self._set_cpu_state_handler = None
self._get_cpu_state_handler = None
self._continue_handler = None
self._get_checksum_handler = None
def set_read_handler(self, listener):
self._read_handler = listener
def set_write_handler(self, listener):
self._write_handler = listener
def set_set_cpu_state_handler(self, listener):
self._set_cpu_state_handler= listener
def set_get_cpu_state_handler(self, listener):
self._get_cpu_state_handler= listener
def set_continue_handler(self, listener):
self._continue_handler= listener
def set_get_checksum_handler(self, listener):
self._get_checksum_handler= listener
def _handle_read(self, params):
assert(self._read_handler) #Read handler must be installed when this is called
params["value"] = self._read_handler(params)
return params["value"]
def _handle_write(self, params):
assert(self._write_handler) #Write handler must be installed when this is called
self._write_handler(params)
def _handle_set_cpu_state(self, params):
assert(self._set_cpu_state_handler)
self._set_cpu_state_handler(params)
def _handle_get_cpu_state(self, params):
assert(self._get_cpu_state_handler)
return self._get_cpu_state_handler(params)
def _handle_continue(self, params):
assert(self._continue_handler)
self._continue_handler(params)
def _handle_get_checksum(self, params):
assert(self._get_checksum_handler)
return self._get_checksum_handler(params['address'], params['size'])
class S2ERemoteMemoryInterface(RemoteMemoryInterface):
def __init__(self, sock_address):
super(S2ERemoteMemoryInterface, self).__init__()
self._thread = threading.Thread(target = self._run)
self._sock_address = sock_address
self._stop = threading.Event()
def start(self):
self._thread.start()
def _run(self):
retries=1
while retries < 10:
try:
log.debug("\r\nConnecting to S2E RemoteMemory plugin at %s:%d\r\n", self._sock_address[0], self._sock_address[1])
sock = socket.create_connection(self._sock_address)
log.info("\r\nConnection to RemoteMemory plugin established\r\n")
retries=10
except Exception:
log.exception("\r\nConnection to S2E RemoteMemory plugin failed (%d tries)\r\n" % retries)
time.sleep(3)
retries = retries+1
sock=None
#TODO: Do proper error signalling
if not sock:
sys.exit(1)
while not self._stop.is_set():
buffer = ""
while True:
if self._stop.is_set():
return
(rd, _, _) = select([sock], [], [], 1)
if rd:
buffer += sock.recv(1).decode(encoding = 'ascii')
try:
# this is outrageous
request = json.loads(buffer)
log.debug('buf: %s' % repr(buffer))
buffer = "" # reset the buffer if we were able to parse it
request["cmd"] # wait for cmd?
break
except:
# wait for more data
pass
try:
if request["cmd"] == "read":
params = {"address" : int(request["params"]["address"], 16),
"size": int(request["params"]["size"], 16),
"cpu_state": request["cpu_state"]}
value = self._handle_read(params)
json_string = json.dumps({"reply": "read", "value": "0x%x" % value}) + "\n"
sock.send(json_string.encode(encoding = 'ascii'))
elif request["cmd"] == "write":
params = {"address" : int(request["params"]["address"], 16),
"size": int(request["params"]["size"], 16),
"value": int(request["params"]["value"], 16),
"cpu_state": request["cpu_state"]}
self._handle_write(params)
elif request["cmd"] == "set_cpu_state":
params = {"cpu_state": request["cpu_state"]}
self._handle_set_cpu_state(params)
json_string = json.dumps({"reply":"done"}) + \
"\n"
sock.sendall(json_string.encode(encoding =
'ascii'))
elif request["cmd"] == "get_cpu_state":
params = None
ret = self._handle_get_cpu_state(params)
ret = dict(list(ret.items()) + list({"reply":"get_cpu_state"}.items()))
json_string = json.dumps(ret) + "\n"
sock.sendall(json_string.encode(encoding =
'ascii'))
elif request["cmd"] == "continue":
params = None
self._handle_continue(params)
# here we should wait for the breakpoint to be
# hit
json_string = json.dumps({"reply":"done"}) + \
"\n"
sock.sendall(json_string.encode(encoding =
'ascii'))
elif request["cmd"] == "write_buffer":
params = {"address": int(request["address"], 16),
"file": request["file"]}
self._handle_write_buffer(params)
elif request["cmd"] == "get_checksum":
params = {"address": int(request["params"]["address"], 16),
"size": int(request["params"]["size"], 16)}
ret = self._handle_get_checksum(params)
json_string = json.dumps({"reply":"done", \
"value": "0x%08x" % ret}) + \
"\n"
sock.sendall(json_string.encode(encoding =
'ascii'))
else:
log.error("Unknown cmd %s" % (request['cmd']))
except Exception:
log.exception("Error in remote memory interface")
def stop(self):
self._stop.set()
|
middleware.py
|
"""
Here are all the sockets.
There shall not be any sockets outside of this file.
There shall be one socket in one seperate thread (multithreading)
for every tcp connections
also one socket in a thread for the udp socket for broadcast
"""
import threading
import socket
from time import sleep
usleep = lambda x: sleep(x/1000_000.0) # sleep for x microseconds
######################################### PARAMETER Constants
BROADCAST_PORT = 61425
BUFFER_SIZE = 1024
BROADCAST_LISTENER_SLEEP = 10 # microseconds
IP_ADRESS_OF_THIS_PC = socket.gethostbyname(socket.gethostname())
net = ipaddress.IPv4Network(IP_ADRESS_OF_THIS_PC + '/' + self.SUBNETMASK, False)
BROADCAST_IP = net.broadcast_address.exploded
class BroadcastHandler():
def __init__(self):
self.listenerList = []
self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Create a UDP socket for Listening
self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the socket to broadcast and enable reusing addresses
self.listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind socket to address and port
self.listen_socket.bind((IP_ADRESS_OF_THIS_PC, BROADCAST_PORT))
# Create Thread to listen to UDP Broadcast
listen_UDP = threading.Thread(target=self.listenUdp)
listen_UDP.start()
print("listenUDP Thread has started and not blocked Progress (by running in the background")
def broadcast(self, broadcast_message):
# Send message on broadcast address
self.broadcast_socket.sendto(str.encode(broadcast_message), (BROADCAST_IP, BROADCAST_PORT))
#self.broadcast_socket.close()
def subscribeBroadcastListener(self, observer):
self.listenerList += observer
def listenUdp(self):
while True:
data, addr = self.listen_socket.recvfrom(BUFFER_SIZE)
usleep(BROADCAST_LISTENER_SLEEP)# sleep microseconds
if data:
print("Received broadcast message form",addr, ": ", data.decode())
for observer in self.listenerList:
observer.broadcastMessageReceived()
data = None
class Middleware():
holdBackQueue = []
deliveryQueue = []
def __init__(self):
broadcastHandler = BroadcastHandler()
pass
def broadcastToAll(self, message):
pass
def sendMessageTo(self, uuid, message):
pass
|
procutil.py
|
# procutil.py - utility for managing processes and executable environment
#
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import errno
import io
import os
import signal
import subprocess
import sys
import threading
import time
from ..i18n import _
from ..pycompat import (
getattr,
open,
)
from .. import (
encoding,
error,
policy,
pycompat,
)
# Import like this to keep import-checker happy
from ..utils import resourceutil
osutil = policy.importmod('osutil')
if pycompat.iswindows:
from .. import windows as platform
else:
from .. import posix as platform
def isatty(fp):
try:
return fp.isatty()
except AttributeError:
return False
class BadFile(io.RawIOBase):
"""Dummy file object to simulate closed stdio behavior"""
def readinto(self, b):
raise IOError(errno.EBADF, 'Bad file descriptor')
def write(self, b):
raise IOError(errno.EBADF, 'Bad file descriptor')
class LineBufferedWrapper(object):
def __init__(self, orig):
self.orig = orig
def __getattr__(self, attr):
return getattr(self.orig, attr)
def write(self, s):
orig = self.orig
res = orig.write(s)
if s.endswith(b'\n'):
orig.flush()
return res
io.BufferedIOBase.register(LineBufferedWrapper)
def make_line_buffered(stream):
if pycompat.ispy3 and not isinstance(stream, io.BufferedIOBase):
# On Python 3, buffered streams can be expected to subclass
# BufferedIOBase. This is definitively the case for the streams
# initialized by the interpreter. For unbuffered streams, we don't need
# to emulate line buffering.
return stream
if isinstance(stream, LineBufferedWrapper):
return stream
return LineBufferedWrapper(stream)
def unwrap_line_buffered(stream):
if isinstance(stream, LineBufferedWrapper):
assert not isinstance(stream.orig, LineBufferedWrapper)
return stream.orig
return stream
class WriteAllWrapper(object):
def __init__(self, orig):
self.orig = orig
def __getattr__(self, attr):
return getattr(self.orig, attr)
def write(self, s):
write1 = self.orig.write
m = memoryview(s)
total_to_write = len(s)
total_written = 0
while total_written < total_to_write:
total_written += write1(m[total_written:])
return total_written
io.IOBase.register(WriteAllWrapper)
def _make_write_all(stream):
assert pycompat.ispy3
if isinstance(stream, WriteAllWrapper):
return stream
if isinstance(stream, io.BufferedIOBase):
# The io.BufferedIOBase.write() contract guarantees that all data is
# written.
return stream
# In general, the write() method of streams is free to write only part of
# the data.
return WriteAllWrapper(stream)
if pycompat.ispy3:
# Python 3 implements its own I/O streams. Unlike stdio of C library,
# sys.stdin/stdout/stderr may be None if underlying fd is closed.
# TODO: .buffer might not exist if std streams were replaced; we'll need
# a silly wrapper to make a bytes stream backed by a unicode one.
if sys.stdin is None:
stdin = BadFile()
else:
stdin = sys.stdin.buffer
if sys.stdout is None:
stdout = BadFile()
else:
stdout = _make_write_all(sys.stdout.buffer)
if sys.stderr is None:
stderr = BadFile()
else:
stderr = _make_write_all(sys.stderr.buffer)
if pycompat.iswindows:
# Work around Windows bugs.
stdout = platform.winstdout(stdout)
stderr = platform.winstdout(stderr)
if isatty(stdout):
# The standard library doesn't offer line-buffered binary streams.
stdout = make_line_buffered(stdout)
else:
# Python 2 uses the I/O streams provided by the C library.
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
if pycompat.iswindows:
# Work around Windows bugs.
stdout = platform.winstdout(stdout)
stderr = platform.winstdout(stderr)
if isatty(stdout):
if pycompat.iswindows:
# The Windows C runtime library doesn't support line buffering.
stdout = make_line_buffered(stdout)
else:
# glibc determines buffering on first write to stdout - if we
# replace a TTY destined stdout with a pipe destined stdout (e.g.
# pager), we want line buffering.
stdout = os.fdopen(stdout.fileno(), 'wb', 1)
findexe = platform.findexe
_gethgcmd = platform.gethgcmd
getuser = platform.getuser
getpid = os.getpid
hidewindow = platform.hidewindow
readpipe = platform.readpipe
setbinary = platform.setbinary
setsignalhandler = platform.setsignalhandler
shellquote = platform.shellquote
shellsplit = platform.shellsplit
spawndetached = platform.spawndetached
sshargs = platform.sshargs
testpid = platform.testpid
try:
setprocname = osutil.setprocname
except AttributeError:
pass
try:
unblocksignal = osutil.unblocksignal
except AttributeError:
pass
closefds = pycompat.isposix
def explainexit(code):
"""return a message describing a subprocess status
(codes from kill are negative - not os.system/wait encoding)"""
if code >= 0:
return _(b"exited with status %d") % code
return _(b"killed by signal %d") % -code
class _pfile(object):
"""File-like wrapper for a stream opened by subprocess.Popen()"""
def __init__(self, proc, fp):
self._proc = proc
self._fp = fp
def close(self):
# unlike os.popen(), this returns an integer in subprocess coding
self._fp.close()
return self._proc.wait()
def __iter__(self):
return iter(self._fp)
def __getattr__(self, attr):
return getattr(self._fp, attr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def popen(cmd, mode=b'rb', bufsize=-1):
if mode == b'rb':
return _popenreader(cmd, bufsize)
elif mode == b'wb':
return _popenwriter(cmd, bufsize)
raise error.ProgrammingError(b'unsupported mode: %r' % mode)
def _popenreader(cmd, bufsize):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdout=subprocess.PIPE,
)
return _pfile(p, p.stdout)
def _popenwriter(cmd, bufsize):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdin=subprocess.PIPE,
)
return _pfile(p, p.stdin)
def popen2(cmd, env=None):
# Setting bufsize to -1 lets the system decide the buffer size.
# The default for bufsize is 0, meaning unbuffered. This leads to
# poor performance on Mac OS X: http://bugs.python.org/issue4194
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=-1,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=tonativeenv(env),
)
return p.stdin, p.stdout
def popen3(cmd, env=None):
stdin, stdout, stderr, p = popen4(cmd, env)
return stdin, stdout, stderr
def popen4(cmd, env=None, bufsize=-1):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=tonativeenv(env),
)
return p.stdin, p.stdout, p.stderr, p
def pipefilter(s, cmd):
'''filter string S through command CMD, returning its output'''
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
pout, perr = p.communicate(s)
return pout
def tempfilter(s, cmd):
"""filter string S through a pair of temporary files with CMD.
CMD is used as a template to create the real command to be run,
with the strings INFILE and OUTFILE replaced by the real names of
the temporary files generated."""
inname, outname = None, None
try:
infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-')
fp = os.fdopen(infd, 'wb')
fp.write(s)
fp.close()
outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-')
os.close(outfd)
cmd = cmd.replace(b'INFILE', inname)
cmd = cmd.replace(b'OUTFILE', outname)
code = system(cmd)
if pycompat.sysplatform == b'OpenVMS' and code & 1:
code = 0
if code:
raise error.Abort(
_(b"command '%s' failed: %s") % (cmd, explainexit(code))
)
with open(outname, b'rb') as fp:
return fp.read()
finally:
try:
if inname:
os.unlink(inname)
except OSError:
pass
try:
if outname:
os.unlink(outname)
except OSError:
pass
_filtertable = {
b'tempfile:': tempfilter,
b'pipe:': pipefilter,
}
def filter(s, cmd):
"""filter a string through a command that transforms its input to its
output"""
for name, fn in pycompat.iteritems(_filtertable):
if cmd.startswith(name):
return fn(s, cmd[len(name) :].lstrip())
return pipefilter(s, cmd)
_hgexecutable = None
def hgexecutable():
"""return location of the 'hg' executable.
Defaults to $HG or 'hg' in the search path.
"""
if _hgexecutable is None:
hg = encoding.environ.get(b'HG')
mainmod = sys.modules['__main__']
if hg:
_sethgexecutable(hg)
elif resourceutil.mainfrozen():
if getattr(sys, 'frozen', None) == 'macosx_app':
# Env variable set by py2app
_sethgexecutable(encoding.environ[b'EXECUTABLEPATH'])
else:
_sethgexecutable(pycompat.sysexecutable)
elif (
not pycompat.iswindows
and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg'
):
_sethgexecutable(pycompat.fsencode(mainmod.__file__))
else:
_sethgexecutable(
findexe(b'hg') or os.path.basename(pycompat.sysargv[0])
)
return _hgexecutable
def _sethgexecutable(path):
"""set location of the 'hg' executable"""
global _hgexecutable
_hgexecutable = path
def _testfileno(f, stdf):
fileno = getattr(f, 'fileno', None)
try:
return fileno and fileno() == stdf.fileno()
except io.UnsupportedOperation:
return False # fileno() raised UnsupportedOperation
def isstdin(f):
return _testfileno(f, sys.__stdin__)
def isstdout(f):
return _testfileno(f, sys.__stdout__)
def protectstdio(uin, uout):
"""Duplicate streams and redirect original if (uin, uout) are stdio
If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's
redirected to stderr so the output is still readable.
Returns (fin, fout) which point to the original (uin, uout) fds, but
may be copy of (uin, uout). The returned streams can be considered
"owned" in that print(), exec(), etc. never reach to them.
"""
uout.flush()
fin, fout = uin, uout
if _testfileno(uin, stdin):
newfd = os.dup(uin.fileno())
nullfd = os.open(os.devnull, os.O_RDONLY)
os.dup2(nullfd, uin.fileno())
os.close(nullfd)
fin = os.fdopen(newfd, 'rb')
if _testfileno(uout, stdout):
newfd = os.dup(uout.fileno())
os.dup2(stderr.fileno(), uout.fileno())
fout = os.fdopen(newfd, 'wb')
return fin, fout
def restorestdio(uin, uout, fin, fout):
"""Restore (uin, uout) streams from possibly duplicated (fin, fout)"""
uout.flush()
for f, uif in [(fin, uin), (fout, uout)]:
if f is not uif:
os.dup2(f.fileno(), uif.fileno())
f.close()
def shellenviron(environ=None):
"""return environ with optional override, useful for shelling out"""
def py2shell(val):
"""convert python object into string that is useful to shell"""
if val is None or val is False:
return b'0'
if val is True:
return b'1'
return pycompat.bytestr(val)
env = dict(encoding.environ)
if environ:
env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ))
env[b'HG'] = hgexecutable()
return env
if pycompat.iswindows:
def shelltonative(cmd, env):
return platform.shelltocmdexe( # pytype: disable=module-attr
cmd, shellenviron(env)
)
tonativestr = encoding.strfromlocal
else:
def shelltonative(cmd, env):
return cmd
tonativestr = pycompat.identity
def tonativeenv(env):
"""convert the environment from bytes to strings suitable for Popen(), etc."""
return pycompat.rapply(tonativestr, env)
def system(cmd, environ=None, cwd=None, out=None):
"""enhanced shell command execution.
run with environment maybe modified, maybe in different dir.
if out is specified, it is assumed to be a file-like object that has a
write() method. stdout and stderr will be redirected to out."""
try:
stdout.flush()
except Exception:
pass
env = shellenviron(environ)
if out is None or isstdout(out):
rc = subprocess.call(
tonativestr(cmd),
shell=True,
close_fds=closefds,
env=tonativeenv(env),
cwd=pycompat.rapply(tonativestr, cwd),
)
else:
proc = subprocess.Popen(
tonativestr(cmd),
shell=True,
close_fds=closefds,
env=tonativeenv(env),
cwd=pycompat.rapply(tonativestr, cwd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for line in iter(proc.stdout.readline, b''):
out.write(line)
proc.wait()
rc = proc.returncode
if pycompat.sysplatform == b'OpenVMS' and rc & 1:
rc = 0
return rc
_is_gui = None
def _gui():
'''Are we running in a GUI?'''
if pycompat.isdarwin:
if b'SSH_CONNECTION' in encoding.environ:
# handle SSH access to a box where the user is logged in
return False
elif getattr(osutil, 'isgui', None):
# check if a CoreGraphics session is available
return osutil.isgui()
else:
# pure build; use a safe default
return True
else:
return (
pycompat.iswindows
or encoding.environ.get(b"DISPLAY")
or encoding.environ.get(b"WAYLAND_DISPLAY")
)
def gui():
global _is_gui
if _is_gui is None:
_is_gui = _gui()
return _is_gui
def hgcmd():
"""Return the command used to execute current hg
This is different from hgexecutable() because on Windows we want
to avoid things opening new shell windows like batch files, so we
get either the python call or current executable.
"""
if resourceutil.mainfrozen():
if getattr(sys, 'frozen', None) == 'macosx_app':
# Env variable set by py2app
return [encoding.environ[b'EXECUTABLEPATH']]
else:
return [pycompat.sysexecutable]
return _gethgcmd()
def rundetached(args, condfn):
"""Execute the argument list in a detached process.
condfn is a callable which is called repeatedly and should return
True once the child process is known to have started successfully.
At this point, the child process PID is returned. If the child
process fails to start or finishes before condfn() evaluates to
True, return -1.
"""
# Windows case is easier because the child process is either
# successfully starting and validating the condition or exiting
# on failure. We just poll on its PID. On Unix, if the child
# process fails to start, it will be left in a zombie state until
# the parent wait on it, which we cannot do since we expect a long
# running process on success. Instead we listen for SIGCHLD telling
# us our child process terminated.
terminated = set()
def handler(signum, frame):
terminated.add(os.wait())
prevhandler = None
SIGCHLD = getattr(signal, 'SIGCHLD', None)
if SIGCHLD is not None:
prevhandler = signal.signal(SIGCHLD, handler)
try:
pid = spawndetached(args)
while not condfn():
if (pid in terminated or not testpid(pid)) and not condfn():
return -1
time.sleep(0.1)
return pid
finally:
if prevhandler is not None:
signal.signal(signal.SIGCHLD, prevhandler)
@contextlib.contextmanager
def uninterruptible(warn):
"""Inhibit SIGINT handling on a region of code.
Note that if this is called in a non-main thread, it turns into a no-op.
Args:
warn: A callable which takes no arguments, and returns True if the
previous signal handling should be restored.
"""
oldsiginthandler = [signal.getsignal(signal.SIGINT)]
shouldbail = []
def disabledsiginthandler(*args):
if warn():
signal.signal(signal.SIGINT, oldsiginthandler[0])
del oldsiginthandler[0]
shouldbail.append(True)
try:
try:
signal.signal(signal.SIGINT, disabledsiginthandler)
except ValueError:
# wrong thread, oh well, we tried
del oldsiginthandler[0]
yield
finally:
if oldsiginthandler:
signal.signal(signal.SIGINT, oldsiginthandler[0])
if shouldbail:
raise KeyboardInterrupt
if pycompat.iswindows:
# no fork on Windows, but we can create a detached process
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
# No stdlib constant exists for this value
DETACHED_PROCESS = 0x00000008
# Following creation flags might create a console GUI window.
# Using subprocess.CREATE_NEW_CONSOLE might helps.
# See https://phab.mercurial-scm.org/D1701 for discussion
_creationflags = (
DETACHED_PROCESS
| subprocess.CREATE_NEW_PROCESS_GROUP # pytype: disable=module-attr
)
def runbgcommand(
script,
env,
shell=False,
stdout=None,
stderr=None,
ensurestart=True,
record_wait=None,
stdin_bytes=None,
):
'''Spawn a command without waiting for it to finish.'''
# we can't use close_fds *and* redirect stdin. I'm not sure that we
# need to because the detached process has no console connection.
try:
stdin = None
if stdin_bytes is not None:
stdin = pycompat.unnamedtempfile()
stdin.write(stdin_bytes)
stdin.flush()
stdin.seek(0)
p = subprocess.Popen(
pycompat.rapply(tonativestr, script),
shell=shell,
env=tonativeenv(env),
close_fds=True,
creationflags=_creationflags,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
if record_wait is not None:
record_wait(p.wait)
finally:
if stdin is not None:
stdin.close()
else:
def runbgcommand(
cmd,
env,
shell=False,
stdout=None,
stderr=None,
ensurestart=True,
record_wait=None,
stdin_bytes=None,
):
"""Spawn a command without waiting for it to finish.
When `record_wait` is not None, the spawned process will not be fully
detached and the `record_wait` argument will be called with a the
`Subprocess.wait` function for the spawned process. This is mostly
useful for developers that need to make sure the spawned process
finished before a certain point. (eg: writing test)"""
if pycompat.isdarwin:
# avoid crash in CoreFoundation in case another thread
# calls gui() while we're calling fork().
gui()
# double-fork to completely detach from the parent process
# based on http://code.activestate.com/recipes/278731
if record_wait is None:
pid = os.fork()
if pid:
if not ensurestart:
# Even though we're not waiting on the child process,
# we still must call waitpid() on it at some point so
# it's not a zombie/defunct. This is especially relevant for
# chg since the parent process won't die anytime soon.
# We use a thread to make the overhead tiny.
def _do_wait():
os.waitpid(pid, 0)
t = threading.Thread(target=_do_wait)
t.daemon = True
t.start()
return
# Parent process
(_pid, status) = os.waitpid(pid, 0)
if os.WIFEXITED(status):
returncode = os.WEXITSTATUS(status)
else:
returncode = -(os.WTERMSIG(status))
if returncode != 0:
# The child process's return code is 0 on success, an errno
# value on failure, or 255 if we don't have a valid errno
# value.
#
# (It would be slightly nicer to return the full exception info
# over a pipe as the subprocess module does. For now it
# doesn't seem worth adding that complexity here, though.)
if returncode == 255:
returncode = errno.EINVAL
raise OSError(
returncode,
b'error running %r: %s'
% (cmd, os.strerror(returncode)),
)
return
returncode = 255
try:
if record_wait is None:
# Start a new session
os.setsid()
# connect stdin to devnull to make sure the subprocess can't
# muck up that stream for mercurial.
if stdin_bytes is None:
stdin = open(os.devnull, b'r')
else:
stdin = pycompat.unnamedtempfile()
stdin.write(stdin_bytes)
stdin.flush()
stdin.seek(0)
if stdout is None:
stdout = open(os.devnull, b'w')
if stderr is None:
stderr = open(os.devnull, b'w')
p = subprocess.Popen(
cmd,
shell=shell,
env=env,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
if record_wait is not None:
record_wait(p.wait)
returncode = 0
except EnvironmentError as ex:
returncode = ex.errno & 0xFF
if returncode == 0:
# This shouldn't happen, but just in case make sure the
# return code is never 0 here.
returncode = 255
except Exception:
returncode = 255
finally:
# mission accomplished, this child needs to exit and not
# continue the hg process here.
stdin.close()
if record_wait is None:
os._exit(returncode)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test hiphopcoind shutdown."""
from test_framework.test_framework import HiphopcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(HiphopcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
test.py
|
#!/usr/bin/env python
import rospy
import time
from std_msgs.msg import Int32
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool
import numpy as np
import threading
import os
################################################################################ DEBUG
debug_disable_hilens = os.getenv('DEBUG_NOHILENS') == '1'
debug_disable_lanecam = os.getenv('DEBUG_NOLANECAM') == '1'
debug_fakerun = os.getenv('DEBUG_FAKERUN') == '1'
debug_enable_pause = True
debug_default_speed = 35
print('################################################################################')
print('HiLens : ' + str(not debug_disable_hilens))
print('LaneCam : ' + str(not debug_disable_lanecam))
print('FakeRun : ' + str(debug_fakerun))
print('Pause : ' + str(debug_enable_pause))
print('DefaultSpeed : ' + str(debug_default_speed))
print('################################################################################')
################################################################################ STATE
state_manul = 0 # 0 - Automatic
state_speed = debug_default_speed if debug_disable_hilens else 0 # SPEED
state_direction = 49 # 0-LEFT-50-RIGHT-100
state_gear = 1 # 1 - Drive, 2 - Stop
state_onpesd = False
state_paused = False
################################################################################ Publishers
pub_m = rospy.Publisher('/bluetooth/received/manual', Int32, queue_size=10)
pub_d = rospy.Publisher('/auto_driver/send/direction', Int32, queue_size=10)
pub_s = rospy.Publisher('/auto_driver/send/speed', Int32, queue_size=10)
pub_g = rospy.Publisher('/auto_driver/send/gear', Int32, queue_size=10)
def applyState():
pub_m.publish(state_manul)
pub_d.publish(state_direction)
pub_s.publish(state_speed)
pub_g.publish(state_gear)
################################################################################ Main
def realmain():
global state_manul
global state_direction
global state_speed
global state_gear
rospy.init_node('manager', anonymous=True)
threading.Thread(target=lambda: rospy.spin()).start()
rate = rospy.Rate(20)
state_speed = 35
count = 0
while not rospy.is_shutdown():
count += 1
if count == 50:
state_speed -= 10
applyState()
rate.sleep()
if __name__ == '__main__':
realmain()
|
__init__.py
|
import os
import re
import sys
import inspect
import warnings
import functools
import threading
from timeit import default_timer
from flask import request, make_response, current_app
from flask import Flask, Response
from flask.views import MethodViewType
from werkzeug.serving import is_running_from_reloader
from prometheus_client import Counter, Histogram, Gauge, Summary
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
if sys.version_info[0:2] >= (3, 4):
# Python v3.4+ has a built-in has __wrapped__ attribute
wraps = functools.wraps
else:
# in previous Python version we have to set the missing attribute
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
try:
# try to convert http.HTTPStatus to int status codes
from http import HTTPStatus
def _to_status_code(response_status):
if isinstance(response_status, HTTPStatus):
return response_status.value
else:
return response_status
except ImportError:
# otherwise simply use the status as is
def _to_status_code(response_status):
return response_status
NO_PREFIX = '#no_prefix'
"""
Constant indicating that default metrics should not have any prefix applied.
It purposely uses invalid characters defined for metrics names as specified in Prometheus
documentation (see: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels)
"""
class PrometheusMetrics(object):
"""
Prometheus metrics export configuration for Flask.
The default metrics include a Histogram for HTTP request latencies
and number of HTTP requests plus a Counter for the total number
of HTTP requests.
Sample usage:
app = Flask(__name__)
metrics = PrometheusMetrics(app)
# static information as metric
metrics.info('app_info', 'Application info', version='1.0.3')
@app.route('/')
def main():
pass # requests tracked by default
@app.route('/skip')
@metrics.do_not_track()
def skip():
pass # default metrics are not collected
@app.route('/<item_type>')
@metrics.do_not_track()
@metrics.counter('invocation_by_type', 'Number of invocations by type',
labels={'item_type': lambda: request.view_args['type']})
def by_type(item_type):
pass # only the counter is collected, not the default metrics
@app.route('/long-running')
@metrics.gauge('in_progress', 'Long running requests in progress')
def long_running():
pass
@app.route('/status/<int:status>')
@metrics.do_not_track()
@metrics.summary('requests_by_status', 'Request latencies by status',
labels={'status': lambda r: r.status_code})
@metrics.histogram('requests_by_status_and_path', 'Request latencies by status and path',
labels={'status': lambda r: r.status_code, 'path': lambda: request.path})
def echo_status(status):
return 'Status: %s' % status, status
Label values can be defined as callables:
- With a single argument that will be the Flask Response object
- Without an argument, possibly to use with the Flask `request` object
"""
def __init__(self, app, path='/metrics',
export_defaults=True, defaults_prefix='flask',
group_by='path', buckets=None,
default_latency_as_histogram=True,
default_labels=None, response_converter=None,
excluded_paths=None, registry=None, **kwargs):
"""
Create a new Prometheus metrics export configuration.
:param app: the Flask application
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param defaults_prefix: string to prefix the default exported
metrics name with (when either `export_defaults=True` or
`export_defaults(..)` is called) or in case you don't want
any prefix then use `NO_PREFIX` constant
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `url_rule`, etc.
(defaults to `path`)
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param default_latency_as_histogram: export request latencies
as a Histogram (defaults), otherwise use a Summary
:param default_labels: default labels to attach to each of the
metrics exposed by this `PrometheusMetrics` instance
:param response_converter: a function that converts the captured
the produced response object to a Flask friendly representation
:param excluded_paths: regular expression(s) as a string or
a list of strings for paths to exclude from tracking
:param registry: the Prometheus Registry to use
"""
self.app = app
self.path = path
self._export_defaults = export_defaults
self._defaults_prefix = defaults_prefix or 'flask'
self._default_labels = default_labels or {}
self._default_latency_as_histogram = default_latency_as_histogram
self._response_converter = response_converter or make_response
self.buckets = buckets
self.version = __version__
if registry:
self.registry = registry
else:
# load the default registry from the underlying
# Prometheus library here for easier unit testing
# see https://github.com/rycus86/prometheus_flask_exporter/pull/20
from prometheus_client import REGISTRY as DEFAULT_REGISTRY
self.registry = DEFAULT_REGISTRY
if kwargs.get('static_labels'):
warnings.warn(
'The `static_labels` argument of `PrometheusMetrics` is '
'deprecated since 0.15.0, please use the '
'new `default_labels` argument.', DeprecationWarning
)
for key, value in kwargs.get('static_labels', dict()).items():
if key not in self._default_labels:
self._default_labels[key] = value
if kwargs.get('group_by_endpoint') is True:
warnings.warn(
'The `group_by_endpoint` argument of `PrometheusMetrics` is '
'deprecated since 0.4.0, please use the '
'new `group_by` argument.', DeprecationWarning
)
self.group_by = 'endpoint'
elif group_by:
self.group_by = group_by
else:
self.group_by = 'path'
if excluded_paths:
if PrometheusMetrics._is_string(excluded_paths):
excluded_paths = [excluded_paths]
self.excluded_paths = [
re.compile(p) for p in excluded_paths
]
else:
self.excluded_paths = None
if app is not None:
self.init_app(app)
@classmethod
def for_app_factory(cls, **kwargs):
"""
A convenience method to create a new instance that is
suitable for Flask "app factory" configurations. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you will need to call `init_app(...)` later
with the Flask application as its parameter.
This method takes the same keyword arguments as the
default constructor.
"""
return cls(app=None, **kwargs)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
use with this prometheus reporter setup.
This is usually used with a Flask "app factory" configuration. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you need to use `PrometheusMetrics.for_app_factory()`
for this mode, otherwise it is called automatically.
:param app: the Flask application
"""
if self.path:
self.register_endpoint(self.path, app)
if self._export_defaults:
self.export_defaults(
buckets=self.buckets, group_by=self.group_by,
latency_as_histogram=self._default_latency_as_histogram,
prefix=self._defaults_prefix, app=app
)
def register_endpoint(self, path, app=None):
"""
Register the metrics endpoint on the Flask application.
:param path: the path of the endpoint
:param app: the Flask application to register the endpoint on
(by default it is the application registered with this class)
"""
if is_running_from_reloader() and not os.environ.get('DEBUG_METRICS'):
return
if app is None:
app = self.app or current_app
@app.route(path)
@self.do_not_track()
def prometheus_metrics():
# import these here so they don't clash with our own multiprocess module
from prometheus_client import multiprocess, CollectorRegistry
if 'prometheus_multiproc_dir' in os.environ:
registry = CollectorRegistry()
else:
registry = self.registry
if 'name[]' in request.args:
registry = registry.restricted_registry(request.args.getlist('name[]'))
if 'prometheus_multiproc_dir' in os.environ:
multiprocess.MultiProcessCollector(registry)
headers = {'Content-Type': CONTENT_TYPE_LATEST}
return generate_latest(registry), 200, headers
def start_http_server(self, port, host='0.0.0.0', endpoint='/metrics'):
"""
Start an HTTP server for exposing the metrics.
This will be an individual Flask application,
not the one registered with this class.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: the URL path to expose the endpoint on
(default: `/metrics`)
"""
if is_running_from_reloader():
return
app = Flask('prometheus-flask-exporter-%d' % port)
self.register_endpoint(endpoint, app)
def run_app():
app.run(host=host, port=port)
thread = threading.Thread(target=run_app)
thread.setDaemon(True)
thread.start()
def export_defaults(self, buckets=None, group_by='path',
latency_as_histogram=True,
prefix='flask', app=None, **kwargs):
"""
Export the default metrics:
- HTTP request latencies
- HTTP request exceptions
- Number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param group_by: group default HTTP metrics by
this request property, like `path`, `endpoint`, `rule`, etc.
(defaults to `path`)
:param latency_as_histogram: export request latencies
as a Histogram, otherwise use a Summary instead
(defaults to `True` to export as a Histogram)
:param prefix: prefix to start the default metrics names with
or `NO_PREFIX` (to skip prefix)
:param app: the Flask application
"""
if app is None:
app = self.app or current_app
if not prefix:
prefix = self._defaults_prefix or 'flask'
if kwargs.get('group_by_endpoint') is True:
warnings.warn(
'The `group_by_endpoint` argument of '
'`PrometheusMetrics.export_defaults` is deprecated since 0.4.0, '
'please use the new `group_by` argument.', DeprecationWarning
)
duration_group = 'endpoint'
elif group_by:
duration_group = group_by
else:
duration_group = 'path'
if callable(duration_group):
duration_group_name = duration_group.__name__
else:
duration_group_name = duration_group
if prefix == NO_PREFIX:
prefix = ""
else:
prefix = prefix + "_"
try:
self.info(
'%sexporter_info' % prefix,
'Information about the Prometheus Flask exporter',
version=self.version
)
except ValueError:
return # looks like we have already exported the default metrics
labels = self._get_combined_labels(None)
if latency_as_histogram:
# use the default buckets from prometheus_client if not given here
buckets_as_kwargs = {}
if buckets is not None:
buckets_as_kwargs['buckets'] = buckets
request_duration_metric = Histogram(
'%shttp_request_duration_seconds' % prefix,
'Flask HTTP request duration in seconds',
('method', duration_group_name, 'status') + labels.keys(),
registry=self.registry,
**buckets_as_kwargs
)
else:
# export as Summary instead
request_duration_metric = Summary(
'%shttp_request_duration_seconds' % prefix,
'Flask HTTP request duration in seconds',
('method', duration_group_name, 'status') + labels.keys(),
registry=self.registry
)
counter_labels = ('method', 'status') + labels.keys()
request_total_metric = Counter(
'%shttp_request_total' % prefix,
'Total number of HTTP requests',
counter_labels,
registry=self.registry
)
request_exceptions_metric = Counter(
'%shttp_request_exceptions_total' % prefix,
'Total number of HTTP requests which resulted in an exception',
counter_labels,
registry=self.registry
)
def before_request():
request.prom_start_time = default_timer()
def after_request(response):
if hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'):
return response
if self.excluded_paths:
if any(pattern.match(request.path) for pattern in self.excluded_paths):
return response
if hasattr(request, 'prom_start_time'):
total_time = max(default_timer() - request.prom_start_time, 0)
if callable(duration_group):
group = duration_group(request)
else:
group = getattr(request, duration_group)
request_duration_labels = {
'method': request.method,
'status': _to_status_code(response.status_code),
duration_group_name: group
}
request_duration_labels.update(labels.values_for(response))
request_duration_metric.labels(**request_duration_labels).observe(total_time)
request_total_metric.labels(
method=request.method, status=_to_status_code(response.status_code),
**labels.values_for(response)
).inc()
return response
def teardown_request(exception=None):
if not exception or hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'):
return
if self.excluded_paths:
if any(pattern.match(request.path) for pattern in self.excluded_paths):
return
response = make_response('Exception: %s' % exception, 500)
if callable(duration_group):
group = duration_group(request)
else:
group = getattr(request, duration_group)
request_exceptions_metric.labels(
method=request.method, status=500,
**labels.values_for(response)
).inc()
if hasattr(request, 'prom_start_time'):
total_time = max(default_timer() - request.prom_start_time, 0)
request_duration_labels = {
'method': request.method,
'status': 500,
duration_group_name: group
}
request_duration_labels.update(labels.values_for(response))
request_duration_metric.labels(**request_duration_labels).observe(total_time)
request_total_metric.labels(
method=request.method, status=500,
**labels.values_for(response)
).inc()
return
app.before_request(before_request)
app.after_request(after_request)
app.teardown_request(teardown_request)
def register_default(self, *metric_wrappers, **kwargs):
"""
Registers metric wrappers to track all endpoints,
similar to `export_defaults` but with user defined metrics.
Call this function after all routes have been set up.
Use the metric wrappers as arguments:
- metrics.counter(..)
- metrics.gauge(..)
- metrics.summary(..)
- metrics.histogram(..)
:param metric_wrappers: one or more metric wrappers to register
for all available endpoints
:param app: the Flask application to register the default metric for
(by default it is the application registered with this class)
"""
app = kwargs.get('app')
if app is None:
app = self.app or current_app
for endpoint, view_func in app.view_functions.items():
for wrapper in metric_wrappers:
view_func = wrapper(view_func)
app.view_functions[endpoint] = view_func
def histogram(self, name, description, labels=None, **kwargs):
"""
Use a Histogram to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Histogram
"""
return self._track(
Histogram,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def summary(self, name, description, labels=None, **kwargs):
"""
Use a Summary to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Summary
"""
return self._track(
Summary,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def gauge(self, name, description, labels=None, **kwargs):
"""
Use a Gauge to track the number of invocations in progress
for the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Gauge
"""
return self._track(
Gauge,
lambda metric, time: metric.dec(),
kwargs, name, description, labels,
registry=self.registry,
before=lambda metric: metric.inc(),
revert_when_not_tracked=lambda metric: metric.dec()
)
def counter(self, name, description, labels=None, **kwargs):
"""
Use a Counter to track the total number of invocations of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Counter
"""
return self._track(
Counter,
lambda metric, time: metric.inc(),
kwargs, name, description, labels,
registry=self.registry
)
def _track(self, metric_type, metric_call, metric_kwargs, name, description, labels,
registry, before=None, revert_when_not_tracked=None):
"""
Internal method decorator logic.
:param metric_type: the type of the metric from the `prometheus_client` library
:param metric_call: the invocation to execute as a callable with `(metric, time)`
:param metric_kwargs: additional keyword arguments for creating the metric
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param registry: the Prometheus Registry to use
:param before: an optional callable to invoke before executing the
request handler method accepting the single `metric` argument
:param revert_when_not_tracked: an optional callable to invoke when
a non-tracked endpoint is being handled to undo any actions already
done on it, accepts a single `metric` argument
"""
if labels is not None and not isinstance(labels, dict):
raise TypeError('labels needs to be a dictionary of {labelname: callable}')
labels = self._get_combined_labels(labels)
parent_metric = metric_type(
name, description, labelnames=labels.keys(), registry=registry,
**metric_kwargs
)
def get_metric(response):
if labels.has_keys():
return parent_metric.labels(**labels.values_for(response))
else:
return parent_metric
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
if before:
metric = get_metric(None)
before(metric)
else:
metric = None
exception = None
start_time = default_timer()
try:
try:
# execute the handler function
response = f(*args, **kwargs)
except Exception as ex:
# let Flask decide to wrap or reraise the Exception
response = current_app.handle_user_exception(ex)
except Exception as ex:
# if it was re-raised, treat it as an InternalServerError
exception = ex
response = make_response('Exception: %s' % ex, 500)
if hasattr(request, 'prom_exclude_all'):
if metric and revert_when_not_tracked:
# special handling for Gauge metrics
revert_when_not_tracked(metric)
return response
total_time = max(default_timer() - start_time, 0)
if not metric:
if not isinstance(response, Response) and request.endpoint:
view_func = current_app.view_functions[request.endpoint]
# There may be decorators 'above' us,
# but before the function is registered with Flask
while view_func and view_func != f:
try:
view_func = view_func.__wrapped__
except AttributeError:
break
if view_func == f:
# we are in a request handler method
response = self._response_converter(response)
elif hasattr(view_func, 'view_class') and isinstance(view_func.view_class, MethodViewType):
# we are in a method view (for Flask-RESTful for example)
response = self._response_converter(response)
metric = get_metric(response)
metric_call(metric, time=total_time)
if exception:
try:
# re-raise for the Flask error handler
raise exception
except Exception as ex:
return current_app.handle_user_exception(ex)
else:
return response
return func
return decorator
def _get_combined_labels(self, labels):
"""
Combines the given labels with static and default labels
and wraps them into an object that can efficiently return
the keys and values of these combined labels.
"""
labels = labels.copy() if labels else dict()
if self._default_labels:
labels.update(self._default_labels.copy())
def argspec(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
def label_value(f):
if not callable(f):
return lambda x: f
if argspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()
class CombinedLabels(object):
def __init__(self, _labels):
self.labels = _labels.items()
def keys(self):
return tuple(map(lambda k: k[0], self.labels))
def has_keys(self):
return len(self.labels) > 0
def values_for(self, response):
label_generator = tuple(
(key, label_value(call))
for key, call in self.labels
) if labels else tuple()
return {key: value(response) for key, value in label_generator}
return CombinedLabels(labels)
@staticmethod
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator
@staticmethod
def exclude_all_metrics():
"""
Decorator to skip all metrics collection for the method.
"""
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
request.prom_exclude_all = True
return f(*args, **kwargs)
return func
return decorator
def info(self, name, description, labelnames=None, labelvalues=None, **labels):
"""
Report any information as a Prometheus metric.
This will create a `Gauge` with the initial value of 1.
The easiest way to use it is:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
version='1.0', major=1, minor=0
)
If the order of the labels matters:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
('version', 'major', 'minor'),
('1.0', 1, 0)
)
:param name: the name of the metric
:param description: the description of the metric
:param labelnames: the names of the labels
:param labelvalues: the values of the labels
:param labels: the names and values of the labels
:return: the newly created `Gauge` metric
"""
if labels and labelnames:
raise ValueError(
'Cannot have labels defined as `dict` '
'and collections of names and values'
)
if labelnames is None and labels:
labelnames = labels.keys()
elif labelnames and labelvalues:
for idx, label_name in enumerate(labelnames):
labels[label_name] = labelvalues[idx]
gauge = Gauge(
name, description, labelnames or tuple(),
registry=self.registry
)
if labels:
gauge = gauge.labels(**labels)
gauge.set(1)
return gauge
@staticmethod
def _is_string(value):
try:
return isinstance(value, basestring) # python2
except NameError:
return isinstance(value, str) # python3
class ConnexionPrometheusMetrics(PrometheusMetrics):
"""
Specific extension for Connexion (https://connexion.readthedocs.io/)
that makes sure responses are converted to Flask responses.
"""
def __init__(self, app, default_mimetype='application/json', **kwargs):
flask_app = app.app if app else None
if 'response_converter' not in kwargs:
kwargs['response_converter'] = self._create_response_converter(default_mimetype)
super().__init__(flask_app, **kwargs)
@staticmethod
def content_type(content_type):
"""
Force the content type of the response,
which would be otherwise overwritten by the metrics conversion
to application/json.
:param content_type: the value to send in the
Content-Type response header
"""
def decorator(f):
@wraps(f)
def func(*args, **kwargs):
request.prom_connexion_content_type = content_type
return f(*args, **kwargs)
return func
return decorator
@staticmethod
def _create_response_converter(default_mimetype):
from connexion.apis.flask_api import FlaskApi
def _make_response(response):
mimetype = default_mimetype
if hasattr(request, 'prom_connexion_content_type'):
mimetype = request.prom_connexion_content_type
return FlaskApi.get_response(response, mimetype=mimetype)
return _make_response
class RESTfulPrometheusMetrics(PrometheusMetrics):
"""
Specific extension for Flask-RESTful (https://flask-restful.readthedocs.io/)
that makes sure API responses are converted to Flask responses.
"""
def __init__(self, app, api, **kwargs):
"""
Initializes a new PrometheusMetrics instance that is appropriate
for a Flask-RESTful application.
:param app: the Flask application
:param api: the Flask-RESTful API instance
"""
if api and 'response_converter' not in kwargs:
kwargs['response_converter'] = self._create_response_converter(api)
super().__init__(app, **kwargs)
@classmethod
def for_app_factory(cls, api=None, **kwargs):
return cls(app=None, api=api, **kwargs)
def init_app(self, app, api=None):
if api:
self._response_converter = self._create_response_converter(api)
return super().init_app(app)
@staticmethod
def _create_response_converter(api):
def _make_response(response):
if response is None:
response = (None, 200)
return api.make_response(*response)
return _make_response
__version__ = '0.16.0'
|
concurrency.py
|
from invoke.vendor.six.moves.queue import Queue
from invoke.util import ExceptionWrapper, ExceptionHandlingThread as EHThread
# TODO: rename
class ExceptionHandlingThread_:
class via_target:
def setup(self):
def worker(q):
q.put(7)
self.worker = worker
def base_case(self):
queue = Queue()
t = EHThread(target=self.worker, args=[queue])
t.start()
t.join()
assert queue.get(block=False) == 7
assert queue.empty()
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
wrapper = t.exception()
assert isinstance(wrapper, ExceptionWrapper)
assert wrapper.kwargs == {"args": [None], "target": self.worker}
assert wrapper.type == AttributeError
assert isinstance(wrapper.value, AttributeError)
def exhibits_is_dead_flag(self):
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
assert t.is_dead
t = EHThread(target=self.worker, args=[Queue()])
t.start()
t.join()
assert not t.is_dead
class via_subclassing:
def setup(self):
class MyThread(EHThread):
def __init__(self, *args, **kwargs):
self.queue = kwargs.pop("queue")
super(MyThread, self).__init__(*args, **kwargs)
def _run(self):
self.queue.put(7)
self.klass = MyThread
def base_case(self):
queue = Queue()
t = self.klass(queue=queue)
t.start()
t.join()
assert queue.get(block=False) == 7
assert queue.empty()
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = self.klass(queue=None)
t.start()
t.join()
wrapper = t.exception()
assert isinstance(wrapper, ExceptionWrapper)
assert wrapper.kwargs == {}
assert wrapper.type == AttributeError
assert isinstance(wrapper.value, AttributeError)
def exhibits_is_dead_flag(self):
t = self.klass(queue=None)
t.start()
t.join()
assert t.is_dead
t = self.klass(queue=Queue())
t.start()
t.join()
assert not t.is_dead
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
import shutil
from struct import pack
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
from CommonDataClass.DataClass import *
from Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for index, line in enumerate(lines):
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m != None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)
if m != None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m != None:
m = re.match(".data.(%s)$" % varname, line)
if m != None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = re.match('^([\da-fA-Fx]+) +([\da-fA-Fx]+)', Str.strip())
if m != None:
varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
status = 1
continue
if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
status = 2
continue
if re.match("^entry point at", line):
status = 3
continue
if status == 1 and len(line) != 0:
m = secRe.match(line)
assert m != None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m != None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 != None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1: Src = f1.read()
with open(TempFullPath, 'rb') as f2: Dst = f2.read()
if Src == Dst:
return RtPath
GlobalData.gTempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in gTempInfs
#
def ClearDuplicatedInf():
for File in GlobalData.gTempInfs:
if os.path.exists(File):
os.remove(File)
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument(Option, OptionString, Value, Parser):
assert Value is None
Value = []
RawArgs = Parser.rargs
while RawArgs:
Arg = RawArgs[0]
if (Arg[:2] == "--" and len(Arg) > 2) or \
(Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
break
Value.append(Arg)
del RawArgs[0]
setattr(Parser.values, Option.dest, Value)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory == None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged(File):
if not os.path.exists(File):
return True
FileState = os.stat(File)
TimeStamp = FileState[-2]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
FileChanged = False
else:
FileChanged = True
gFileTimeStampCache[File] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if GlobalData.gIsWindows:
try:
from PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError, X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd != None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = cPickle.load(Fd)
except Exception, e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd != None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
def GetFiles(Root, SkipList=None, FullPath=True):
OriPath = Root
FileList = []
for Root, Dirs, Files in os.walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
for File in Files:
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile(File, Ext=None):
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Check if gvien file exists or not
#
#
def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
NewFile = File
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False, File
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(NewFile)]
if NewFile != None:
return True, NewFile
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
return True, NewFile
# Last check the path with normal definitions
File = os.path.join(Dir, File)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
return True, NewFile
return False, File
## Check if gvien file exists or not
#
#
def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
NewFile = File
RelaPath = AllFiles[os.path.normpath(Dir)]
NewRelaPath = RelaPath
while(True):
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
File = File.replace('$(EFI_SOURCE)', EfiSource)
File = File.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
NewRelaPath = os.path.dirname(NewFile)
File = os.path.basename(NewFile)
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Last check the path with normal definitions
NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
if NewFile != None:
break
# No file found
break
return NewRelaPath, RelaPath, File
def GetRelPath(Path1, Path2):
FileName = os.path.basename(Path2)
L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
for Index in range(0, len(L1)):
if L1[Index] != L2[Index]:
FileName = '../' * (len(L1) - Index)
for Index2 in range(Index, len(L2)):
FileName = os.path.join(FileName, L2[Index2])
break
return os.path.normpath(FileName)
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = (dict.fromkeys(x for x in P.Guids if x not in P._PrivateGuids)).keys()
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = P.Protocols.keys()
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = (dict.fromkeys(x for x in P.Protocols if x not in P._PrivateProtocols)).keys()
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = P.Ppis.keys()
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = (dict.fromkeys(x for x in P.Ppis if x not in P._PrivatePpis)).keys()
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag == None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage != None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread == None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage != None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag != None:
Progressor._StopFlag.set()
if Progressor._ProgressThread != None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict != None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value == None:
for Key in self.data:
Value = self.data[Key]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
## Boolean chain list
#
class Blist(UserList):
def __init__(self, initlist=None):
UserList.__init__(self, initlist)
def __setitem__(self, i, item):
if item not in [True, False]:
if item == 0:
item = False
else:
item = True
self.data[i] = item
def _GetResult(self):
Value = True
for item in self.data:
Value &= item
return Value
Result = property(_GetResult)
def ParseConsoleLog(Filename):
Opr = open(os.path.normpath(Filename), 'r')
Opw = open(os.path.normpath(Filename + '.New'), 'w+')
for Line in Opr.readlines():
if Line.find('.efi') > -1:
Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
Opw.write('%s\n' % Line)
Opr.close()
Opw.close()
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\"
Data = Setting.replace('\\\\', '//').replace('\\\"', '\\\'')
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InStr = False
Pair = 0
for ch in Data:
if ch == '"':
InStr = not InStr
elif ch == '(' and not InStr:
Pair += 1
elif ch == ')' and not InStr:
Pair -= 1
if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1:
Type = FieldList[1]
# Fix the PCD type when no DataType input
if Type == 'VOID*':
DataType = 'VOID*'
else:
Size = FieldList[1]
if len(FieldList) > 2:
Size = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, '', Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = Type = ''
if len(FieldList) > 1:
Type = FieldList[1]
else:
Type = DataType
if len(FieldList) > 2:
Size = FieldList[2]
else:
if Type == 'VOID*':
if Value.startswith("L"):
Size = str((len(Value)- 3 + 1) * 2)
elif Value.startswith("{"):
Size = str(len(Value.split(",")))
else:
Size = str(len(Value) -2 + 1 )
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, Type, Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == 'VOID*':
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 2)
return [VpdOffset, Size, Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
IsValid = (3 <= len(FieldList) <= 5)
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData(Setting):
ValueList = ['', '', '', '']
TokenList = GetSplitValueList(Setting)
ValueList[0:len(TokenList)] = TokenList
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
#
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
#
def AnalyzeVpdPcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[2] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == "VOID*":
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}'))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = list(Printset)
PrintList.sort()
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return False, "Invalid type [%s]; must be one of VOID*, BOOLEAN, UINT8, UINT16, UINT32, UINT64." % (Type)
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0,len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if type(Other) == type(self):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if type(Other) == type(self):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key == None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds={}):
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num.strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[r[k].strip() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet and SkuIdentifier != 'ALL':
self.SkuIdSet.remove('DEFAULT')
self.SkuIdNumberSet.remove('0U')
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
def __SkuUsageType(self):
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
else:
return SkuClass.SINGLE
else:
return SkuClass.MULTIPLE
def __GetAvailableSkuIds(self):
return self.AvailableSkuIds
def __GetSystemSkuID(self):
if self.__SkuUsageType() == SkuClass.SINGLE:
return self.SkuIdSet[0]
else:
return 'DEFAULT'
def __GetAvailableSkuIdNumber(self):
return self.SkuIdNumberSet
SystemSkuId = property(__GetSystemSkuID)
AvailableSkuIdSet = property(__GetAvailableSkuIds)
SkuUsageType = property(__SkuUsageType)
AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
Guid = Guid.split('-')
return pack('=LHHBBBBBBBB',
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
def BuildOptionPcdValueFormat(TokenSpaceGuidCName, TokenCName, PcdDatumType, Value):
if PcdDatumType == 'VOID*':
if Value.startswith('L'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[0] + '"' + Value[1:] + '"'
elif Value.startswith('H'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[1:]
else:
if not Value[0]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = '"' + Value + '"'
IsValid, Cause = CheckPcdDatum(PcdDatumType, Value)
if not IsValid:
EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
if PcdDatumType == 'BOOLEAN':
Value = Value.upper()
if Value == 'TRUE' or Value == '1':
Value = '1'
elif Value == 'FALSE' or Value == '0':
Value = '0'
return Value
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
runtime.py
|
from metatools.imports import load_entrypoint
from Queue import Queue
import argparse
import datetime
import json
import os
import sys
import threading
import weakref
import traceback
NoResult = object()
def log(msg, *args, **kwargs):
if args or kwargs:
msg = msg.format(*args, **kwargs)
msg = '[mm52x2] {} {}\n'.format(datetime.datetime.now().isoformat('T'), msg.strip())
sys.stderr.write(msg)
sys.stderr.flush()
def send(msg=None, **kwargs):
msg = dict(msg or {}).copy()
msg.update(kwargs)
encoded = json.dumps(msg, sort_keys=True)
log('Send: ' + encoded)
sys.__stdout__.write(encoded + '\n')
sys.__stdout__.flush()
def reply(src, msg=None, **kwargs):
msg = dict(msg or {}).copy()
msg.update(kwargs)
id_ = src.get('id')
if id_ is not None:
msg.setdefault('id', id_)
send(msg)
def format_exception(e):
return dict(type='error', error_type=e.__class__.__name__, error=str(e))
_handlers = {}
def register(func):
name = func.__name__
if name.startswith('on_'):
name = name[3:]
_handlers[name] = func
return func
@register
def on_hello(**kw):
kw['type'] = 'elloh'
return kw
@register
def on_elloh(**kw):
return NoResult
@register
def on_ping(**kw):
kw['type'] = 'pong'
return kw
@register
def on_pong(**kw):
return NoResult
@register
def on_debug_raise_error(**kw):
raise ValueError(kw.get('message', 'This is a test.'))
def debug_environ():
return '\n'.join('{}: {!r}'.format(*x) for x in sorted(os.environ.iteritems()))
_call_count = 0
_call_threads = weakref.WeakValueDictionary()
@register
def on_call(func, args=None, kwargs=None, **msg):
global _call_count
_call_count += 1
thread = _call_threads[_call_count] = threading.Thread(target=_call_thread, args=[msg, func, args, kwargs])
thread.daemon = True
thread.message = msg
thread.start()
del thread # Kill this reference immediately.
return NoResult
def _call_thread(msg, entrypoint, args, kwargs):
log('Calling: {}', entrypoint)
try:
func = load_entrypoint(entrypoint, reload=None)
except Exception as e:
reply(msg, format_exception(e))
return
if not func:
send(type='error', error='Could not load entrypoint.', detail=entrypoint)
return
args = args or ()
kwargs = kwargs or {}
try:
res = func(*args, **kwargs)
except Exception as e:
traceback.print_exc()
reply(msg, format_exception(e))
return
if res is NoResult:
return
reply(msg, type='result', result=res)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
# We need to take over both stdout and stderr so that print statements
# don't result in Premiere thinking it is getting a message back.
if args.verbose:
sys.stdout = sys.stderr
else:
sys.stdout = open('/tmp/mm52x2-premiere-runtime.log', 'a')
sys.stderr = sys.stdout
log('Starting.')
send(
type='hello',
file=__file__,
)
while True:
encoded = sys.stdin.readline()
if not encoded:
log('stdin closed.')
return
encoded = encoded.strip()
if not encoded:
continue
log('Recv: {}', encoded)
try:
msg = json.loads(encoded)
except ValueError as e:
send(type='error', error='malformed message', detail='{}: {}'.format(e.__class__.__name__, e))
continue
if not isinstance(msg, dict):
send(type='error', error='malformed message', detail='non-dict')
continue
type_ = msg.pop('type', None)
id_ = msg.get('id')
if not type_:
send(type='error', error='malformed message', detail='no type')
continue
func = _handlers.get(type_)
if not func:
send(type='error', error='unknown message type', detail=type_)
continue
try:
res = func(**msg)
except Exception as e:
send(format_exception(e))
continue
if res is NoResult:
continue
if not isinstance(res, dict):
res = {'value': res}
res.setdefault('type', 'result')
res.setdefault('id', id_)
send(res)
if __name__ == '__main__':
main()
|
recipe-577129.py
|
#!/usr/bin/python
"""
Run asynchronous tasks in gobject using coroutines. Terminology used:
* Job: A coroutine that yield tasks.
* Task: A function which returns a callable whose only parameter
(task_return) is called with the result of the task.
Tasks themselves must be asynchronous (they are run in the main thread
of the events loop), so you will probably use functions like gobject.idle_add/
timeout_add/io_add_watch to implement them. If you are unable to write your
task in a asynchronous way (or you just can't, i.e. an IO operation), you can
always use a generic threaded_task (see example below).
"""
import gobject
def start_job(generator):
"""Start a job (a coroutine that yield generic tasks)."""
def _task_return(result):
"""Function to be sent to tasks to be used as task_return."""
def _advance_generator():
try:
new_task = generator.send(result)
except StopIteration:
return
new_task(_task_return)
# make sure the generator is advanced in the main thread
gobject.idle_add(_advance_generator)
_task_return(None)
return generator
# 2 task examples: sleep_task, threaded_task
def sleep_task(secs):
"""Suspend job for the given number of seconds and return elapsed time."""
def _task(task_return):
start_time = time.time()
def _on_timeout():
task_return(time.time() - start_time)
gobject.timeout_add(int(secs * 1000), _on_timeout)
return _task
import threading
gobject.threads_init()
def threaded_task(function, *args, **kwargs):
"""Run function(*args, **kwargs) inside a thread and return the result."""
def _task(task_return):
def _thread():
result = function(*args, **kwargs)
gobject.idle_add(task_return, result)
thread = threading.Thread(target=_thread, args=())
thread.setDaemon(True)
thread.start()
return _task
# Example of usage
import sys
import time
import random
import urllib2
def myjob(url):
def download(url):
return urllib2.urlopen(url).read()
elapsed = yield sleep_task(random.uniform(0.0, 3.0))
sys.stderr.write("[slept_for:%0.2f]" % elapsed)
sys.stderr.write("[start_download:%s]" % url)
html = yield threaded_task(download, url)
sys.stderr.write("[done:%s:%d]" % (url, len(html)))
def basso_continuo():
sys.stderr.write(".")
return True
urls = ["http://www.google.com", "http://python.com", "http://www.pygtk.org"]
jobs = [start_job(myjob(url)) for url in urls]
# See how easily can we raise a exception in the job couroutine:
# gobject.timeout_add(1000, lambda: jobs[0].throw(JobStopped))
gobject.timeout_add(100, basso_continuo)
loop = gobject.MainLoop()
loop.run()
|
shotgunEventDaemonHttpd.py
|
#!/usr/bin/env python
#
# Init file for Shotgun event daemon
#
# chkconfig: 345 99 00
# description: Shotgun event daemon
#
### BEGIN INIT INFO
# Provides: shotgunEvent
# Required-Start: $network
# Should-Start: $remote_fs
# Required-Stop: $network
# Should-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Short-Description: Shotgun event daemon
# Description: Shotgun event daemon
### END INIT INFO
"""
For an overview of shotgunEvents, please see raw documentation in the docs
folder or an html compiled version at:
http://shotgunsoftware.github.com/shotgunEvents
"""
__version__ = '0.9'
__version_info__ = (0, 9)
import ConfigParser
import datetime
import imp
import logging
import logging.handlers
import SQLiteHandler
import os
import pprint
import socket
import sys
import time
import traceback
import threading
import Queue
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from distutils.version import StrictVersion
try:
import cPickle as pickle
except ImportError:
import pickle
import daemonizer
import shotgun_api3 as sg
CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0])
PYTHON_25 = StrictVersion('2.5')
PYTHON_26 = StrictVersion('2.6')
PYTHON_27 = StrictVersion('2.7')
if CURRENT_PYTHON_VERSION > PYTHON_25:
EMAIL_FORMAT_STRING = """Time: %(asctime)s
Logger: %(name)s
Path: %(pathname)s
Function: %(funcName)s
Line: %(lineno)d
%(message)s"""
else:
EMAIL_FORMAT_STRING = """Time: %(asctime)s
Logger: %(name)s
Path: %(pathname)s
Line: %(lineno)d
%(message)s"""
my_queue = Queue.Queue()
def _setFilePathOnLogger(logger, path):
# Remove any previous handler.
_removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler)
# Add the file handler
handler = logging.handlers.TimedRotatingFileHandler(path, 'midnight', backupCount=10)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler)
sh = SQLiteHandler.SQLiteHandler(db="test.db")
sh.setLevel(logging.INFO)
logger.addHandler(sh)
def _removeHandlersFromLogger(logger, handlerTypes=None):
"""
Remove all handlers or handlers of a specified type from a logger.
@param logger: The logger who's handlers should be processed.
@type logger: A logging.Logger object
@param handlerTypes: A type of handler or list/tuple of types of handlers
that should be removed from the logger. If I{None}, all handlers are
removed.
@type handlerTypes: L{None}, a logging.Handler subclass or
I{list}/I{tuple} of logging.Handler subclasses.
"""
for handler in logger.handlers:
if handlerTypes is None or isinstance(handler, handlerTypes):
logger.removeHandler(handler)
def _addMailHandlerToLogger(logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None):
"""
Configure a logger with a handler that sends emails to specified
addresses.
The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.
@note: Any SMTPHandler already connected to the logger will be removed.
@param logger: The logger to configure
@type logger: A logging.Logger instance
@param toAddrs: The addresses to send the email to.
@type toAddrs: A list of email addresses that will be passed on to the
SMTPHandler.
"""
if smtpServer and fromAddr and toAddrs and emailSubject:
mailHandler = CustomSMTPHandler(smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure)
mailHandler.setLevel(logging.ERROR)
mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING)
mailHandler.setFormatter(mailFormatter)
logger.addHandler(mailHandler)
class Config(ConfigParser.ConfigParser):
def __init__(self, path):
ConfigParser.ConfigParser.__init__(self)
self.read(path)
def getShotgunURL(self):
return self.get('shotgun', 'server')
def getEngineScriptName(self):
return self.get('shotgun', 'name')
def getEngineScriptKey(self):
return self.get('shotgun', 'key')
def getEngineProxyServer(self):
try:
proxy_server = self.get('shotgun', 'proxy_server').strip()
if not proxy_server:
return None
return proxy_server
except ConfigParser.NoOptionError:
return None
def getEventIdFile(self):
return self.get('daemon', 'eventIdFile')
def getEnginePIDFile(self):
return self.get('daemon', 'pidFile')
def getPluginPaths(self):
return [s.strip() for s in self.get('plugins', 'paths').split(',')]
def getSMTPServer(self):
return self.get('emails', 'server')
def getSMTPPort(self):
if self.has_option('emails', 'port'):
return self.getint('emails', 'port')
return 25
def getFromAddr(self):
return self.get('emails', 'from')
def getToAddrs(self):
return [s.strip() for s in self.get('emails', 'to').split(',')]
def getEmailSubject(self):
return self.get('emails', 'subject')
def getEmailUsername(self):
if self.has_option('emails', 'username'):
return self.get('emails', 'username')
return None
def getEmailPassword(self):
if self.has_option('emails', 'password'):
return self.get('emails', 'password')
return None
def getSecureSMTP(self):
if self.has_option('emails', 'useTLS'):
return self.getboolean('emails', 'useTLS') or False
return False
def getLogMode(self):
return self.getint('daemon', 'logMode')
def getLogLevel(self):
return self.getint('daemon', 'logging')
def getMaxEventBatchSize(self):
if self.has_option('daemon', 'max_event_batch_size'):
return self.getint('daemon', 'max_event_batch_size')
return 500
def getLogFile(self, filename=None):
if filename is None:
if self.has_option('daemon', 'logFile'):
filename = self.get('daemon', 'logFile')
else:
raise ConfigError('The config file has no logFile option.')
if self.has_option('daemon', 'logPath'):
path = self.get('daemon', 'logPath')
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
raise ConfigError('The logPath value in the config should point to a directory.')
path = os.path.join(path, filename)
else:
path = filename
return path
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
global my_queue
self._set_headers()
lstplug = my_queue.get()
self.wfile.write("<html><body><h1>hi! </h1><ol>")
for plugc in lstplug:
for plug in plugc:
self.wfile.write("<li> %s " % plug.getName())
self.wfile.write("</ol> </body></html>")
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write("<html><body><h1>POST!</h1></body></html>")
def worker():
server_address = ('localhost', 5000)
httpd = HTTPServer(server_address, S)
print 'Starting httpd...'
httpd.serve_forever()
return
class Engine(object):
"""
The engine holds the main loop of event processing.
"""
def __init__(self, configPath):
"""
"""
self._continue = True
self._eventIdData = {}
# Read/parse the config
self.config = Config(configPath)
# Get config values
self._pluginCollections = [PluginCollection(self, s) for s in self.config.getPluginPaths()]
self._sg = sg.Shotgun(
self.config.getShotgunURL(),
self.config.getEngineScriptName(),
self.config.getEngineScriptKey()
)
self._max_conn_retries = self.config.getint('daemon', 'max_conn_retries')
self._conn_retry_sleep = self.config.getint('daemon', 'conn_retry_sleep')
self._fetch_interval = self.config.getint('daemon', 'fetch_interval')
self._use_session_uuid = self.config.getboolean('shotgun', 'use_session_uuid')
# Setup the logger for the main engine
if self.config.getLogMode() == 0:
# Set the root logger for file output.
rootLogger = logging.getLogger()
rootLogger.config = self.config
_setFilePathOnLogger(rootLogger, self.config.getLogFile())
print self.config.getLogFile()
# Set the engine logger for email output.
self.log = logging.getLogger('engine')
self.setEmailsOnLogger(self.log, True)
else:
# Set the engine logger for file and email output.
self.log = logging.getLogger('engine')
self.log.config = self.config
_setFilePathOnLogger(self.log, self.config.getLogFile())
self.setEmailsOnLogger(self.log, True)
self.log.setLevel(self.config.getLogLevel())
self.t = threading.Thread(target=worker)
self.t.start()
super(Engine, self).__init__()
def setLogger(self, logger):
self.log = logger
def setEmailsOnLogger(self, logger, emails):
# Configure the logger for email output
_removeHandlersFromLogger(logger, logging.handlers.SMTPHandler)
if emails is False:
return
smtpServer = self.config.getSMTPServer()
smtpPort = self.config.getSMTPPort()
fromAddr = self.config.getFromAddr()
emailSubject = self.config.getEmailSubject()
username = self.config.getEmailUsername()
password = self.config.getEmailPassword()
if self.config.getSecureSMTP():
secure = (None, None)
else:
secure = None
if emails is True:
toAddrs = self.config.getToAddrs()
elif isinstance(emails, (list, tuple)):
toAddrs = emails
else:
msg = 'Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s.'
raise ValueError(msg % type(emails))
"""
_addMailHandlerToLogger(
logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure
)
"""
def start(self):
"""
Start the processing of events.
The last processed id is loaded up from persistent storage on disk and
the main loop is started.
"""
# TODO: Take value from config
socket.setdefaulttimeout(60)
# Notify which version of shotgun api we are using
self.log.info('Using Shotgun version %s' % sg.__version__)
try:
for collection in self._pluginCollections:
collection.load()
self._loadEventIdData()
self._mainLoop()
except KeyboardInterrupt:
self.log.warning('Keyboard interrupt. Cleaning up...')
except Exception, err:
pass
#msg = 'Crash!!!!! Unexpected error (%s) in main loop.\n\n'
#self.log.critical(sg, str(type(err)).replace("'",""))
#self.log.critical(msg, type(err), traceback.format_exc(err))
def _loadEventIdData(self):
"""
Load the last processed event id from the disk
If no event has ever been processed or if the eventIdFile has been
deleted from disk, no id will be recoverable. In this case, we will try
contacting Shotgun to get the latest event's id and we'll start
processing from there.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile and os.path.exists(eventIdFile):
try:
fh = open(eventIdFile)
try:
self._eventIdData = pickle.load(fh)
# Provide event id info to the plugin collections. Once
# they've figured out what to do with it, ask them for their
# last processed id.
noStateCollections = []
for collection in self._pluginCollections:
state = self._eventIdData.get(collection.path)
if state:
collection.setState(state)
else:
noStateCollections.append(collection)
# If we don't have a state it means there's no match
# in the id file. First we'll search to see the latest id a
# matching plugin name has elsewhere in the id file. We do
# this as a fallback in case the plugins directory has been
# moved. If there's no match, use the latest event id
# in Shotgun.
if noStateCollections:
maxPluginStates = {}
for collection in self._eventIdData.values():
for pluginName, pluginState in collection.items():
if pluginName in maxPluginStates.keys():
if pluginState[0] > maxPluginStates[pluginName][0]:
maxPluginStates[pluginName] = pluginState
else:
maxPluginStates[pluginName] = pluginState
lastEventId = self._getLastEventIdFromDatabase()
for collection in noStateCollections:
state = collection.getState()
for pluginName in state.keys():
if pluginName in maxPluginStates.keys():
state[pluginName] = maxPluginStates[pluginName]
else:
state[pluginName] = lastEventId
collection.setState(state)
except pickle.UnpicklingError:
fh.close()
# Backwards compatibility:
# Reopen the file to try to read an old-style int
fh = open(eventIdFile)
line = fh.readline().strip()
if line.isdigit():
# The _loadEventIdData got an old-style id file containing a single
# int which is the last id properly processed.
lastEventId = int(line)
self.log.debug('Read last event id (%d) from file.', lastEventId)
for collection in self._pluginCollections:
collection.setState(lastEventId)
fh.close()
except OSError, err:
raise EventDaemonError('Could not load event id from file.\n\n%s' % traceback.format_exc(err))
else:
# No id file?
# Get the event data from the database.
lastEventId = self._getLastEventIdFromDatabase()
if lastEventId:
for collection in self._pluginCollections:
collection.setState(lastEventId)
self._saveEventIdData()
def _getLastEventIdFromDatabase(self):
conn_attempts = 0
lastEventId = None
while lastEventId is None:
order = [{'column':'id', 'direction':'desc'}]
try:
result = self._sg.find_one("EventLogEntry", filters=[], fields=['id'], order=order)
except (sg.ProtocolError, sg.ResponseError, socket.error), err:
conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err))
except Exception, err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
#CV ADD WARNING
lastEventId = -1
else:
lastEventId = result['id']
self.log.info('Last event id (%d) from the Shotgun database.', lastEventId)
return lastEventId
def _mainLoop(self):
"""
Run the event processing loop.
General behavior:
- Load plugins from disk - see L{load} method.
- Get new events from Shotgun
- Loop through events
- Loop through each plugin
- Loop through each callback
- Send the callback an event
- Once all callbacks are done in all plugins, save the eventId
- Go to the next event
- Once all events are processed, wait for the defined fetch interval time and start over.
Caveats:
- If a plugin is deemed "inactive" (an error occured during
registration), skip it.
- If a callback is deemed "inactive" (an error occured during callback
execution), skip it.
- Each time through the loop, if the pidFile is gone, stop.
"""
global my_queue
self.log.debug('Starting the event processing loop.')
while self._continue:
# Process events
events = self._getNewEvents()
for event in events:
for collection in self._pluginCollections:
collection.process(event)
self._saveEventIdData()
my_queue.put(self._pluginCollections)
# if we're lagging behind Shotgun, we received a full batch of events
# skip the sleep() call in this case
if len(events) < self.config.getMaxEventBatchSize():
time.sleep(self._fetch_interval)
# Reload plugins
for collection in self._pluginCollections:
collection.load()
# Make sure that newly loaded events have proper state.
self._loadEventIdData()
self.log.debug('Shuting down event processing loop.')
def stop(self):
self._continue = False
def _getNewEvents(self):
"""
Fetch new events from Shotgun.
@return: Recent events that need to be processed by the engine.
@rtype: I{list} of Shotgun event dictionaries.
"""
nextEventId = None
for newId in [coll.getNextUnprocessedEventId() for coll in self._pluginCollections]:
if newId is not None and (nextEventId is None or newId < nextEventId):
nextEventId = newId
if nextEventId is not None:
filters = [['id', 'greater_than', nextEventId - 1]]
fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity', 'user', 'project', 'session_uuid', 'created_at']
order = [{'column':'id', 'direction':'asc'}]
conn_attempts = 0
while True:
try:
events = self._sg.find("EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize())
if events:
self.log.debug('Got %d events: %d to %d.', len(events), events[0]['id'], events[-1]['id'])
return events
except (sg.ProtocolError, sg.ResponseError, socket.error), err:
conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err))
except Exception, err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
#CV ADD WARNING
break
return []
def _saveEventIdData(self):
"""
Save an event Id to persistant storage.
Next time the engine is started it will try to read the event id from
this location to know at which event it should start processing.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile is not None:
for collection in self._pluginCollections:
self._eventIdData[collection.path] = collection.getState()
for colPath, state in self._eventIdData.items():
if state:
try:
fh = open(eventIdFile, 'w')
pickle.dump(self._eventIdData, fh)
fh.close()
except OSError, err:
self.log.error('Can not write event id data to %s.\n\n%s', eventIdFile, traceback.format_exc(err))
break
else:
self.log.warning('No state was found. Not saving to disk.')
def _checkConnectionAttempts(self, conn_attempts, msg):
conn_attempts += 1
if conn_attempts == self._max_conn_retries:
self.log.error('Unable to connect to Shotgun (attempt %s of %s): %s', conn_attempts, self._max_conn_retries, msg)
conn_attempts = 0
time.sleep(self._conn_retry_sleep)
else:
self.log.warning('Unable to connect to Shotgun (attempt %s of %s): %s', conn_attempts, self._max_conn_retries, msg)
return conn_attempts
class PluginCollection(object):
"""
A group of plugin files in a location on the disk.
"""
def __init__(self, engine, path):
if not os.path.isdir(path):
raise ValueError('Invalid path: %s' % path)
self._engine = engine
self.path = path
self._plugins = {}
self._stateData = {}
def setState(self, state):
if isinstance(state, int):
for plugin in self:
plugin.setState(state)
self._stateData[plugin.getName()] = plugin.getState()
else:
self._stateData = state
for plugin in self:
pluginState = self._stateData.get(plugin.getName())
if pluginState:
plugin.setState(pluginState)
def getState(self):
for plugin in self:
self._stateData[plugin.getName()] = plugin.getState()
return self._stateData
def getNextUnprocessedEventId(self):
eId = None
for plugin in self:
if not plugin.isActive():
continue
newId = plugin.getNextUnprocessedEventId()
if newId is not None and (eId is None or newId < eId):
eId = newId
return eId
def process(self, event):
for plugin in self:
if plugin.isActive():
plugin.process(event)
else:
plugin.logger.debug('Skipping: inactive.')
def load(self):
"""
Load plugins from disk.
General behavior:
- Loop on all paths.
- Find all valid .py plugin files.
- Loop on all plugin files.
- For any new plugins, load them, otherwise, refresh them.
"""
newPlugins = {}
for basename in os.listdir(self.path):
if not basename.endswith('.py') or basename.startswith('.'):
continue
if basename in self._plugins:
newPlugins[basename] = self._plugins[basename]
else:
newPlugins[basename] = Plugin(self._engine, os.path.join(self.path, basename))
newPlugins[basename].load()
self._plugins = newPlugins
def __iter__(self):
for basename in sorted(self._plugins.keys()):
yield self._plugins[basename]
class Plugin(object):
"""
The plugin class represents a file on disk which contains one or more
callbacks.
"""
def __init__(self, engine, path):
"""
@param engine: The engine that instanciated this plugin.
@type engine: L{Engine}
@param path: The path of the plugin file to load.
@type path: I{str}
@raise ValueError: If the path to the plugin is not a valid file.
"""
self._engine = engine
self._path = path
if not os.path.isfile(path):
raise ValueError('The path to the plugin is not a valid file - %s.' % path)
self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0]
self._active = True
self._callbacks = []
self._mtime = None
self._lastEventId = None
self._backlog = {}
# Setup the plugin's logger
self.logger = logging.getLogger('plugin.' + self.getName())
self.logger.config = self._engine.config
self._engine.setEmailsOnLogger(self.logger, True)
self.logger.setLevel(self._engine.config.getLogLevel())
if self._engine.config.getLogMode() == 1:
_setFilePathOnLogger(self.logger, self._engine.config.getLogFile('plugin.' + self.getName()))
def getName(self):
return self._pluginName
def setState(self, state):
if isinstance(state, int):
self._lastEventId = state
elif isinstance(state, tuple):
self._lastEventId, self._backlog = state
else:
raise ValueError('Unknown state type: %s.' % type(state))
def getState(self):
return (self._lastEventId, self._backlog)
def getNextUnprocessedEventId(self):
return self._lastEventId
if self._lastEventId:
nextId = self._lastEventId + 1
else:
nextId = None
now = datetime.datetime.now()
for k in self._backlog.keys():
v = self._backlog[k]
if v < now:
self.logger.warning('Timeout elapsed on backlog event id %d.', k)
del(self._backlog[k])
elif nextId is None or k < nextId:
nextId = k
return nextId
def isActive(self):
"""
Is the current plugin active. Should it's callbacks be run?
@return: True if this plugin's callbacks should be run, False otherwise.
@rtype: I{bool}
"""
return self._active
def setEmails(self, *emails):
"""
Set the email addresses to whom this plugin should send errors.
@param emails: See L{LogFactory.getLogger}'s emails argument for info.
@type emails: A I{list}/I{tuple} of email addresses or I{bool}.
"""
self._engine.setEmailsOnLogger(self.logger, emails)
def load(self):
"""
Load/Reload the plugin and all its callbacks.
If a plugin has never been loaded it will be loaded normally. If the
plugin has been loaded before it will be reloaded only if the file has
been modified on disk. In this event callbacks will all be cleared and
reloaded.
General behavior:
- Try to load the source of the plugin.
- Try to find a function called registerCallbacks in the file.
- Try to run the registration function.
At every step along the way, if any error occurs the whole plugin will
be deactivated and the function will return.
"""
# Check file mtime
mtime = os.path.getmtime(self._path)
if self._mtime is None:
self._engine.log.info('Loading plugin at %s' % self._path)
elif self._mtime < mtime:
self._engine.log.info('Reloading plugin at %s' % self._path)
else:
# The mtime of file is equal or older. We don't need to do anything.
return
# Reset values
self._mtime = mtime
self._callbacks = []
self._active = True
try:
plugin = imp.load_source(self._pluginName, self._path)
except:
self._active = False
self.logger.error('Could not load the plugin at %s.\n\n%s', self._path, traceback.format_exc())
return
regFunc = getattr(plugin, 'registerCallbacks', None)
if callable(regFunc):
try:
regFunc(Registrar(self))
except Exception as e:
print(e.message)
self._engine.log.critical('Error running register callback function from plugin at %s.\n\n%s', self._path, traceback.format_exc())
self._active = False
else:
self._engine.log.critical('Did not find a registerCallbacks function in plugin at %s.', self._path)
self._active = False
def registerCallback(self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True):
"""
Register a callback in the plugin.
"""
#global sg
#sgConnection = sg.Shotgun(self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey,
# http_proxy=self._engine.config.getEngineProxyServer())
self._callbacks.append(Callback(callback, self, self._engine, None, matchEvents, args, stopOnError))
def process(self, event):
"""
if event['id'] in self._backlog:
if self._process(event):
self.logger.info('Processed id %d from backlog.' % event['id'])
del(self._backlog[event['id']])
self._updateLastEventId(event)
elif self._lastEventId is not None and event['id'] <= self._lastEventId:
msg = 'Event %d is too old. Last event processed was (%d).'
self.logger.debug(msg, event['id'], self._lastEventId)
else:
"""
if self._process(event):
self._updateLastEventId(event)
return self._active
def _process(self, event):
for callback in self:
if callback.isActive():
if callback.canProcess(event):
msg = 'Dispatching event %d to callback %s.'
self.logger.debug(msg, event['id'], str(callback))
if not callback.process(event):
# A callback in the plugin failed. Deactivate the whole
# plugin.
self._active = False
break
else:
msg = 'Skipping inactive callback %s in plugin.'
self.logger.debug(msg, str(callback))
return self._active
def _updateLastEventId(self, event):
BACKLOG_TIMEOUT = 5 # time in minutes after which we consider a pending event won't happen
if self._lastEventId is not None and event["id"] > self._lastEventId + 1:
event_date = event["created_at"].replace(tzinfo=None)
if datetime.datetime.now() > (event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT)):
# the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event
# with a lower id should have shown up in the EventLog by now if it actually happened
if event["id"]==self._lastEventId+2:
self.logger.info('Event %d never happened - ignoring.', self._lastEventId+1)
else:
self.logger.info('Events %d-%d never happened - ignoring.', self._lastEventId+1, event["id"]-1)
else:
# in this case, we want to add the missing events to the backlog as they could show up in the
# EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range
# them to show up until they expire
expiration = datetime.datetime.now() + datetime.timedelta(minutes=BACKLOG_TIMEOUT)
for skippedId in range(self._lastEventId + 1, event["id"]):
self.logger.info('Adding event id %d to backlog.', skippedId)
self._backlog[skippedId] = expiration
self._lastEventId = event["id"]
def __iter__(self):
"""
A plugin is iterable and will iterate over all its L{Callback} objects.
"""
return self._callbacks.__iter__()
def __str__(self):
"""
Provide the name of the plugin when it is cast as string.
@return: The name of the plugin.
@rtype: I{str}
"""
return self.getName()
class Registrar(object):
"""
See public API docs in docs folder.
"""
def __init__(self, plugin):
"""
Wrap a plugin so it can be passed to a user.
"""
self._plugin = plugin
self._allowed = ['logger', 'setEmails', 'registerCallback']
def getConfig(self):
return self._plugin._engine.config
def getLogger(self):
"""
Get the logger for this plugin.
@return: The logger configured for this plugin.
@rtype: L{logging.Logger}
"""
# TODO: Fix this ugly protected member access
return self.logger
def __getattr__(self, name):
if name in self._allowed:
return getattr(self._plugin, name)
raise AttributeError("type object '%s' has no attribute '%s'" % (type(self).__name__, name))
class Callback(object):
"""
A part of a plugin that can be called to process a Shotgun event.
"""
def __init__(self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True):
"""
@param callback: The function to run when a Shotgun event occurs.
@type callback: A function object.
@param engine: The engine that will dispatch to this callback.
@type engine: L{Engine}.
@param shotgun: The Shotgun instance that will be used to communicate
with your Shotgun server.
@type shotgun: L{sg.Shotgun}
@param matchEvents: The event filter to match events against before invoking callback.
@type matchEvents: dict
@param args: Any datastructure you would like to be passed to your
callback function. Defaults to None.
@type args: Any object.
@raise TypeError: If the callback is not a callable object.
"""
if not callable(callback):
raise TypeError('The callback must be a callable object (function, method or callable class instance).')
self._name = None
self._shotgun = shotgun
self._callback = callback
self._engine = engine
self._logger = None
self._matchEvents = matchEvents
self._args = args
self._stopOnError = stopOnError
self._active = True
# Find a name for this object
if hasattr(callback, '__name__'):
self._name = callback.__name__
elif hasattr(callback, '__class__') and hasattr(callback, '__call__'):
self._name = '%s_%s' % (callback.__class__.__name__, hex(id(callback)))
else:
raise ValueError('registerCallback should be called with a function or a callable object instance as callback argument.')
# TODO: Get rid of this protected member access
self._logger = logging.getLogger(plugin.logger.name + '.' + self._name)
self._logger.config = self._engine.config
def canProcess(self, event):
if not self._matchEvents:
return True
if '*' in self._matchEvents:
eventType = '*'
else:
eventType = event['event_type']
if eventType not in self._matchEvents:
return False
attributes = self._matchEvents[eventType]
if attributes is None or '*' in attributes:
return True
if event['attribute_name'] and event['attribute_name'] in attributes:
return True
return False
def process(self, event):
"""
Process an event with the callback object supplied on initialization.
If an error occurs, it will be logged appropriately and the callback
will be deactivated.
@param event: The Shotgun event to process.
@type event: I{dict}
"""
# set session_uuid for UI updates
#if self._engine._use_session_uuid:
# self._shotgun.set_session_uuid(event['session_uuid'])
try:
self._callback(self._shotgun, self._logger, event, self._args, "tptp")
except Exception as e:
print(e)
# Get the local variables of the frame of our plugin
tb = sys.exc_info()[2]
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
msg = 'An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s'
self._logger.critical(msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals))
if self._stopOnError:
self._active = False
return self._active
def isActive(self):
"""
Check if this callback is active, i.e. if events should be passed to it
for processing.
@return: True if this callback should process events, False otherwise.
@rtype: I{bool}
"""
return self._active
def __str__(self):
"""
The name of the callback.
@return: The name of the callback
@rtype: I{str}
"""
return self._name
class CustomSMTPHandler(logging.handlers.SMTPHandler):
"""
A custom SMTPHandler subclass that will adapt it's subject depending on the
error severity.
"""
LEVEL_SUBJECTS = {
logging.ERROR: 'ERROR - Shotgun event daemon.',
logging.CRITICAL: 'CRITICAL - Shotgun event daemon.',
}
def __init__(self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None):
args = [smtpServer, fromAddr, toAddrs, emailSubject]
if credentials:
# Python 2.6 implemented the credentials argument
if CURRENT_PYTHON_VERSION >= PYTHON_26:
args.append(credentials)
else:
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
# Python 2.7 implemented the secure argument
if CURRENT_PYTHON_VERSION >= PYTHON_27:
args.append(secure)
else:
self.secure = secure
logging.handlers.SMTPHandler.__init__(self, *args)
def getSubject(self, record):
subject = logging.handlers.SMTPHandler.getSubject(self, record)
if record.levelno in self.LEVEL_SUBJECTS:
return subject + ' ' + self.LEVEL_SUBJECTS[record.levelno]
return subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
# If the socket timeout isn't None, in Python 2.4 the socket read
# following enabling starttls() will hang. The default timeout will
# be reset to 60 later in 2 locations because Python 2.4 doesn't support
# except and finally in the same try block.
if CURRENT_PYTHON_VERSION >= PYTHON_25:
socket.setdefaulttimeout(None)
# Mostly copied from Python 2.7 implementation.
# Using email.Utils instead of email.utils for 2.4 compat.
try:
import smtplib
from email.Utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP()
smtp.connect(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.close()
except (KeyboardInterrupt, SystemExit):
socket.setdefaulttimeout(60)
raise
except:
self.handleError(record)
socket.setdefaulttimeout(60)
class EventDaemonError(Exception):
"""
Base error for the Shotgun event system.
"""
pass
class ConfigError(EventDaemonError):
"""
Used when an error is detected in the config file.
"""
pass
class LinuxDaemon(daemonizer.Daemon):
"""
Linux Daemon wrapper or wrapper used for foreground operation on Windows
"""
def __init__(self):
self._engine = Engine(_getConfigPath())
super(LinuxDaemon, self).__init__('shotgunEvent', self._engine.config.getEnginePIDFile())
def start(self, daemonize=True):
if not daemonize:
# Setup the stdout logger
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(levelname)s:%(name)s:%(message)s"))
logging.getLogger().addHandler(handler)
super(LinuxDaemon, self).start(daemonize)
def _run(self):
"""
Start the engine's main loop
"""
self._engine.start()
def _cleanup(self):
self._engine.stop()
def main():
"""
"""
action = None
if len(sys.argv) > 1:
action = sys.argv[1]
if sys.platform == 'win32' and action != 'foreground':
win32serviceutil.HandleCommandLine(WindowsService)
return 0
if action:
daemon = LinuxDaemon()
# Find the function to call on the daemon and call it
func = getattr(daemon, action, None)
if action[:1] != '_' and func is not None:
func()
return 0
print "Unknown command: %s" % action
print "usage: %s start|stop|restart|foreground" % sys.argv[0]
return 2
def _getConfigPath():
"""
Get the path of the shotgunEventDaemon configuration file.
"""
paths = ['/etc', os.path.dirname(__file__)]
# Get the current path of the daemon script
scriptPath = sys.argv[0]
if scriptPath != '' and scriptPath != '-c':
# Make absolute path and eliminate any symlinks if any.
scriptPath = os.path.abspath(scriptPath)
scriptPath = os.path.realpath(scriptPath)
# Add the script's directory to the paths we'll search for the config.
paths[:0] = [os.path.dirname(scriptPath)]
# Search for a config file.
for path in paths:
path = os.path.join(path, 'shotgunEventDaemon.conf')
if os.path.exists(path):
return path
# No config file was found
raise EventDaemonError('Config path not found, searched %s' % ', '.join(paths))
if __name__ == '__main__':
engine = Engine(_getConfigPath())
engine.start()
engine._mainLoop()
#sys.exit(main())
|
ajax.py
|
import json
import logging
import os
import threading
import time
import cherrypy
import datetime
import core
from core import config, library, searchresults, searcher, snatcher, notification, plugins, downloaders
from core.library import Metadata, Manage
from core.movieinfo import TheMovieDatabase, YouTube
from core.providers import torrent, newznab
from core.helpers import Conversions
import backup
from gettext import gettext as _
logging = logging.getLogger(__name__)
class Errors():
''' Namespace for common error messages used in AJAX responses '''
database_write = _('Unable to write to database.')
database_read = _('Unable to read {} details from database.')
tmdb_not_found = _('Unable to find {} on TheMovieDB.')
class Ajax(object):
''' These are all the methods that handle ajax post/get requests from the browser.
Except in special circumstances, all should return an 'ajax-style response', which is a
dict with a response key to indicate success, and additional keys for expected data output.
For example {'response': False, 'error': 'something broke'}
{'response': True, 'results': ['this', 'is', 'the', 'output']}
'''
@cherrypy.expose
@cherrypy.tools.json_out()
def library(self, sort_key, sort_direction, limit=50, offset=0, status=None, category=None):
''' Get 50 movies from library
sort_key (str): column name to sort by
sort_direction (str): direction to sort [ASC, DESC]
limit: int number of movies to get <optional - default 50>
offset: int list index postition to start slice <optional - default 0>
status (list): filter movies with these statuses only <optional>
category (str): filter movies with this category only <optional>
Gets a movies slice, length by limit, from library sorted by sort key
Returns list of dicts of movies
'''
if status and not isinstance(status, list):
status = [status]
if status and 'Finished' in status:
status.append('Disabled')
return core.sql.get_user_movies(sort_key, sort_direction.upper(), limit, offset, status, category)
@cherrypy.expose
@cherrypy.tools.json_out()
def library_counters(self, category=None):
''' Get movies counters group by status, filtered by category
category (str): Count movies with this category <optional>
'''
status_count = core.sql.get_library_count('status', 'category', category)
status_count['Finished'] = status_count.get('Finished', 0) + status_count.get('Disabled', 0)
if 'Disabled' in status_count:
del status_count['Disabled']
return status_count
@cherrypy.expose
@cherrypy.tools.json_out()
def search_tmdb(self, search_term):
''' Search tmdb for movies
search_term (str): title and year of movie (Movie Title 2016)
Returns list of dicts that contain tmdb's data.
'''
results = TheMovieDatabase.search(search_term)
if results:
Manage.add_status_to_search_movies(results)
else:
logging.info('No Results found for {}'.format(search_term))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def tmdb_categories(self, cat, tmdbid=None):
''' Get categories of movies from TMDB
Returns list of dicts of movies
'''
results = TheMovieDatabase.get_category(cat, tmdbid)[:8]
if results:
Manage.add_status_to_search_movies(results)
else:
logging.info('No Results found for {}'.format(cat))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def quick_titles(self):
return core.sql.quick_titles()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_search_results(self, imdbid, quality=None):
''' Gets search results for movie
imdbid (str): imdb id #
quality (str): quality profile for movie <optional - default None>
Passes request to sql.get_search_results() then filters out unused download methods.
Returns dict ajax-style response
'''
results = Manage.search_results(imdbid, quality=quality)
if not results:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
ne = Conversions.human_datetime(ne) if ne else '[Disabled]'
return {'response': False, 'next': ne}
else:
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results}
@cherrypy.expose
def get_trailer(self, title, year):
''' Gets trailer embed url from youtube
title (str): title of movie
year (str/int): year of movie release
Returns str
'''
return YouTube.trailer('{} {}'.format(title, year))
@cherrypy.expose
@cherrypy.tools.json_out()
def add_wanted_movie(self, data):
''' Adds movie to library
data (str): json-formatted dict of known movie data
Calls library.Manage.add_movie to add to library.
Returns dict ajax-style response
'''
movie = json.loads(data)
response = Manage.add_movie(movie, full_metadata=False)
if response['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
threading.Thread(target=searcher._t_search_grab, args=(movie,)).start()
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def save_settings(self, data):
''' Saves settings to config file
data (dict): of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary, ie scheduler restart/reloads
Returns dict ajax-style response
'''
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return {'response': True, 'message': _('Settings saved.')}
try:
config.write(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Writing config.', exc_info=True)
return {'response': False, 'error': _('Unable to write to config file.')}
return {'response': True, 'message': _('Settings saved.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def remove_movie(self, imdbid):
''' Removes movie
imdbid (str): imdb id #
Returns dict ajax-style response
'''
return Manage.remove_movie(imdbid)
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_movie_file(self, imdbid):
''' Deletes movie file for imdbid
imdbid (str): imdb id #
Returns dict ajax-style response
'''
logging.info('Deleting file for {}.'.format(imdbid))
f = core.sql.get_movie_details('imdbid', imdbid).get('finished_file')
try:
logging.debug('Finished file for {} is {}'.format(imdbid, f))
if os.path.exists(f):
os.unlink(f)
core.sql.update_multiple_values('MOVIES', {'finished_date': None, 'finished_score': None, 'finished_file': None}, 'imdbid', imdbid)
return {'response': True, 'message': _('Deleted movie file {}.').format(f)}
except Exception as e:
logging.error('Unable to delete file {}'.format(f), exc_info=True)
return {'response': False, 'error': str(e)}
@cherrypy.expose
@cherrypy.tools.json_out()
def search(self, imdbid):
''' Search indexers for specific movie.
imdbid (str): imdb id #
Gets movie data from database and sends to searcher.search()
Returns dict ajax-style response
'''
logging.info('Starting user-requested backlog search for {}'.format(imdbid))
movie = core.sql.get_movie_details('imdbid', imdbid)
if not movie:
return {'response': False, 'error': Errors.database_read.format(imdbid)}
else:
success = searcher.search(movie)
status = core.sql.get_movie_details('imdbid', imdbid)['status']
if success:
results = core.sql.get_search_results(imdbid, movie['quality'], rejected=True)
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
r = {'response': True, 'results': results, 'movie_status': status}
if len(results) == 0:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
r['next'] = Conversions.human_datetime(ne) if ne else '[Disabled]'
return r
else:
return {'response': False, 'error': Errors.database_read.format(imdbid), 'movie_status': status}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_download(self, year, guid, kind):
''' Sends search result to downloader manually
guid (str): download link for nzb/magnet/torrent file.
kind (str): type of download (torrent, magnet, nzb)
Returns dict ajax-style response
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return {'response': False, 'error': _('Link is NZB but no Usent client is enabled.')}
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return {'response': False, 'error': _('Link is Torrent/Magnet but no Torrent client is enabled.')}
data = dict(core.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return snatcher.download(data)
else:
return {'response': False, 'error': Errors.database_read.format(kind)}
@cherrypy.expose
@cherrypy.tools.json_out()
def mark_bad(self, guid, imdbid, cancel_download=False):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
cancel_download (bool): send command to download client to cancel download
Returns dict ajax-style response
'''
sr_orig = core.sql.get_single_search_result('guid', guid)
sr = Manage.searchresults(guid, 'Bad')
Manage.markedresults(guid, 'Bad', imdbid=imdbid)
if sr:
response = {'response': True, 'message': _('Marked release as Bad.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = Manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
if cancel_download:
cancelled = False
if sr_orig.get('status') != 'Snatched':
return response
client = sr_orig['download_client'] if sr_orig else None
downloadid = sr_orig['downloadid'] if sr_orig else None
if not client:
logging.info('Download client not found, cannot cancel download.')
return response
else:
cancelled = getattr(downloaders, client).cancel_download(downloadid)
if not cancelled:
response['response'] = False
response['error'] = response.get('error', '') + _(' Could not remove download from client.')
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def unmark_bad(self, guid, imdbid):
''' Removes bad mark for guid in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
Returns dict ajax-style response
'''
logging.info('Removing {} from MARKEDRESULTS.'.format(guid.split('&')[0]))
if not core.sql.delete('MARKEDRESULTS', 'guid', guid):
logging.info('Removing MARKEDRESULTS {} failed.'.format(guid.split('&')[0]))
return {'response': False, 'error': Errors.database_write}
else:
logging.info('Successfully removed {} from MARKEDRESULTS.'.format(guid.split('&')[0]))
sr = Manage.searchresults(guid, 'Available')
if sr:
response = {'response': True, 'message': _('Marked release as Available.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = Manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
return response
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
index (str/int): index of notification to remove
'index' will be of type string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
notification.remove(int(index))
return
@cherrypy.expose
@cherrypy.tools.json_out()
def update_check(self):
''' Manually check for updates
Returns list:
[0] dict ajax-style response
[1] dict of core notifications
'''
response = core.updater.update_check()
if response['status'] == 'current':
n = [[{'message': _('No updates available.')}, {'type': 'primary'}]]
return [response, n]
else:
return [response, core.NOTIFICATIONS]
@cherrypy.expose
@cherrypy.tools.json_out()
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
mode (str): which downloader to test.
data (dict): connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns dict ajax-style response
'''
response = {}
data = json.loads(data)
test = getattr(downloaders, mode).test_connection(data)
if test is True:
response['response'] = True
response['message'] = _('Connection successful.')
else:
response['response'] = False
response['error'] = test
return response
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
mode (str): command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
if mode == 'restart':
threading.Timer(1, core.restart).start()
return
elif mode == 'shutdown':
threading.Timer(1, core.shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_server(self, mode):
''' Starts and executes update process.
mode (str): 'set_true' or 'update_now'
This method has two major functions based on mode
set_true:
Sets core.UPDATING to True, the browser should then automatically redirect
the user to the update page that calls update_server('update_now')
update_now:
Starts update process:
* Stops task scheduler to cancel all Timers
* Waits for in-process tasks to finish. Yields to browser a list of
currently-running tasks every 1.5 seconds
* Yields updating message to browser. Calls update method
* Sets core.UPDATING to False
* Yields response from update method to browser
If False, starts scheduler plugin again to get back to a normal state
If True, calls restart method. Browser is responsible for redirecting
afer the server is back up.
Returns dict ajax-style response
'''
if mode == 'set_true':
core.UPDATING = True
return json.dumps({'response': True})
if mode == 'update_now':
logging.info('Update process started.')
core.scheduler_plugin.stop()
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
while len(active_tasks) > 0:
yield json.dumps({'response': True, 'status': 'waiting', 'active_tasks': active_tasks})
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
time.sleep(1.5)
yield json.dumps({'response': True, 'status': 'updating'})
update_status = core.updater.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False, 'error': _('Unable to complete update.')})
core.scheduler_plugin.restart()
elif update_status is True:
yield json.dumps({'response': True, 'status': 'complete'})
self.server_status('restart')
else:
return json.dumps({'response': False})
update_server._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def update_movie_options(self, quality, category, status, language, title, filters, imdbid):
''' Updates quality settings for individual title
quality (str): name of new quality
category (str): name of new category
status (str): management state ('automatic', 'disabled')
language (str): name of language to download movie
title (str): movie title
filters (str): JSON.stringified dict of filter words
imdbid (str): imdb identification number
Returns dict ajax-style response
'''
success = {'response': True, 'message': _('Movie options updated.')}
if not Manage.update_movie_options(imdbid, quality, category, language, title, filters):
return {'response': False, 'error': Errors.database_write}
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
new_status = Manage.movie_status(imdbid)
if not new_status:
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = new_status
return success
elif status == 'Disabled':
if not core.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = 'Disabled'
return success
@cherrypy.expose
def get_log_text(self, logfile):
''' Gets log file contents
logfile (str): name of log file to read
logfile should be filename only, not the path to the file
Returns str
'''
logging.info('Dumping log file {} to text.'.format(logfile))
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
@cherrypy.tools.json_out()
def indexer_test(self, indexer, apikey, mode):
''' Tests connection to newznab indexer
indexer (str): url of indexer
apikey (str): indexer's api key
mode (str): newznab or torznab
Returns dict ajax-style response
'''
if mode == 'newznab':
return newznab.NewzNab.test_connection(indexer, apikey)
elif mode == 'torznab':
return torrent.Torrent.test_connection(indexer, apikey)
else:
return {'response': False, 'error': _('Invalid test mode.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder (str): folder to read config file from
conf (str): filename of config file (ie 'my_plugin.conf')
Returns string
'''
c = os.path.join(core.PLUGIN_DIR, folder, conf)
logging.info('Reading plugin config {}'.format(c))
try:
with open(c) as f:
config = json.load(f)
except Exception as e:
logging.error('Unable to read config file.', exc_info=True)
return ''
return plugins.render_config(config)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_plugin_conf(self, folder, filename, config):
''' Calls plugin_conf_popup to render html
folder (str): folder to store config file
filename (str): filename of config file (ie 'my_plugin.conf')
config (str): json data to store in conf file
Returns dict ajax-style response
'''
conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, filename)
logging.info('Saving plugin config as {}'.format(conf_file))
config = json.loads(config)
response = {'response': True, 'message': _('Settings saved.')}
try:
with open(conf_file, 'w') as output:
json.dump(config, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return response
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive, skipduplicatedirs, maxresults):
''' Calls library to scan directory for movie files
directory (str): directory to scan
minsize (str/int): minimum file size in mb, coerced to int
recursive (bool): whether or not to search subdirs
skipduplicatedirs (bool): whether or not to skip duplicate dirs
maxresults (str/int): maximum result count, coerced to int
Finds all files larger than minsize in directory.
Removes all movies from gathered list that are already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds dict ajax-style response
'''
recursive = json.loads(recursive)
minsize = int(minsize)
# Note - do not limit the result set here, or we might get stuck looking at files we already have
files = core.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
user_movies = core.sql.get_user_movies()
library_files = [i['finished_file'] for i in user_movies]
library = [i['imdbid'] for i in user_movies]
files = [file for file in files['files'] if file not in library_files]
skipduplicatedirs = json.loads(skipduplicatedirs)
if skipduplicatedirs:
# Build dict of dir:[files]
library_file_dirs = {}
for f in library_files:
if f:
fd = os.path.dirname(f)
library_file_dirs.setdefault(fd, []).append(f)
# Log all possible duplicate dirs to help with manual maintenance
for f in files:
fd = os.path.dirname(f)
if fd in library_file_dirs:
logging.info('## {} directory already in library'.format(f))
for x in library_file_dirs[fd]:
logging.info('## {}'.format(x))
# Remove the files which have duplicate dirs (likely to be the same imdbid)
# This avoids doing a metadata probe which is then ignored
files = [f for f in files if os.path.dirname(f) not in library_file_dirs]
# We do not need the dict any more, so release the memory
del library_file_dirs
# Limit the number of results
# We do this here instead of at the scan so we skip files we have already imported
if maxresults:
maxresults = int(maxresults)
if maxresults > 0:
files = files[0:maxresults]
length = len(files)
if length == 0:
yield json.dumps({'response': None})
raise StopIteration()
logging.info('Parsing {} directory scan results.'.format(length))
for index, path in enumerate(files):
logging.info('Gathering metatadata for {}'.format(path))
metadata = {}
response = {'progress': [index + 1, length]}
try:
metadata = Metadata.from_file(path)
if not metadata.get('imdbid'):
metadata['imdbid'] = ''
logging.info('IMDB unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
elif metadata['imdbid'] in library:
logging.info('{} ({}) already in library, ignoring.'.format(metadata['title'], path))
response['response'] = 'in_library'
# Log all possible duplicate files to help with manual maintenance
for i in user_movies:
if i['imdbid'] == metadata['imdbid']:
logging.info('## {} {}'.format(i['imdbid'], i['finished_file']))
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
else:
logging.info('All data found for import {}'.format(metadata['title']))
response['response'] = 'complete'
if response['response'] == 'complete':
p = metadata.get('poster_path')
r = metadata.get('resolution')
metadata = Metadata.convert_to_db(metadata)
metadata['poster_path'] = p
metadata['resolution'] = r
metadata['size'] = os.path.getsize(path)
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
metadata['finished_file'] = path
if response['response'] == 'in_library':
metadata = {'title': metadata['title']}
response['movie'] = metadata
yield json.dumps(response)
except Exception as e:
logging.warning('Error gathering metadata.', exc_info=True)
yield json.dumps({'response': 'incomplete', 'movie': metadata})
continue
scan_library_directory._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def import_dir(self, movies, corrected_movies):
''' Imports list of movies in data
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
logging.info('Adding directory scan movies to library.')
today = str(datetime.date.today())
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('{} corrected movies, gathering metadata.'.format(len(corrected_movies)))
for data in corrected_movies:
tmdbdata = TheMovieDatabase._search_tmdbid(data['tmdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['tmdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['tmdbid'])})
progress += 1
logging.info('Adding {} directory scan movies to library.'.format(len(movie_data)))
for movie in movie_data:
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Directory Import'
movie['finished_date'] = today
movie['id'] = movie['tmdbid']
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['title']))
logging.debug(movie)
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['title'])})
progress += 1
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir (str): base path
move_dir (str): child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Returns dict ajax-style response
'''
current_dir = current_dir.strip()
move_dir = move_dir.strip()
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['list'] = [i for i in os.listdir(new_path) if os.path.isdir(os.path.join(new_path, i)) and not i.startswith('.')]
response['list'].sort()
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def update_metadata(self, imdbid, tmdbid=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
r = Metadata.update(imdbid, tmdbid)
if r['response'] is True:
return {'response': True, 'message': _('Metadata updated.')}
else:
return r
@cherrypy.expose
@cherrypy.tools.json_out()
def movie_metadata(self, imdbid, tmdbid=None, language=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
result = Metadata.tmdb_data(imdbid, tmdbid=tmdbid, language=language)
if result:
return {'response': True, 'tmdb_data': result}
else:
return {'response': False, 'error': 'Unable to find {} on TMDB.'.format(tmdbid or imdbid)}
@cherrypy.expose
@cherrypy.tools.json_out()
def single_movie_details(self, key, value):
''' Gets single movie's details from database
key (str): key for sql.get_movie_details
value (str): value for sql.get_movie_details
Returns dict
'''
return core.sql.get_movie_details(key, value)
@cherrypy.expose
@cherrypy.tools.json_out()
def set_movie_details(self, data):
''' Updates movie in database
data (dict): movie fields and values to update
data *must* include valid tmdbid
Returns dict
'''
data = json.loads(data)
tmdbid = data.pop('tmdbid')
if not core.sql.update_multiple_values('MOVIES', data, 'tmdbid', tmdbid):
return {'response': False, 'error': Errors.database_write}
else:
return {'response': True, 'message': 'Database Updated'}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url (str): url of kodi server
Calls Kodi import method to gather list.
Returns dict ajax-style response
'''
return library.ImportKodiLibrary.get_movies(url)
@cherrypy.expose
def import_kodi_movies(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data (str): json-formatted list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
logging.info('Adding {} Kodi movies to library.'.format(length))
for movie in movies:
if not movie['imdbid']:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format("NONE")})
progress += 1
continue
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if not tmdb_data or not tmdb_data[0].get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
tmdb_data = tmdb_data[0]
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['finished_file'] = (movie.get('finished_file') or '').strip()
movie['origin'] = 'Kodi Import'
response = Manage.add_movie(movie)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'title': movie['title'], 'imdbid': movie['imdbid'], 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def upload_plex_csv(self, file_input):
''' Recieves upload of csv from browser
file_input (b'str): csv file fo read
Reads/parses csv file into a usable dict
Returns dict ajax-style response
'''
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e:
logging.error('Unable to parse Plex CSV', exc_info=True)
return {'response': False, 'error': str(e)}
if csv_text:
return library.ImportPlexLibrary.read_csv(csv_text)
else:
return {'response': True, 'complete': [], 'incomplete': []}
@cherrypy.expose
def import_plex_csv(self, movies, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies)))
for movie in corrected_movies:
tmdbdata = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
movie['year'] = tmdbdata['release_date'][:4]
movie.update(tmdbdata)
movie_data.append(movie)
else:
logging.error(Errors.tmdb_not_found.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
logging.info('Adding {} Plex movies to library.'.format(length))
for movie in movie_data:
logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', '')))
fm = False
if not movie.get('imdbid') and movie.get('tmdbid'):
tmdb_data = TheMovieDatabase._search_tmdbid(movie['tmdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
fm = True
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])})
progress += 1
continue
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Plex Import'
if not movie.get('id'):
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
response = Manage.add_movie(movie, full_metadata=fm)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']})
progress += 1
continue
else:
logging.error(Errors.tmdb_not_found.format(movie['title']))
yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']})
progress += 1
continue
if fake_results:
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
if fake_results:
core.sql.write_search_results(fake_results)
import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_cp_movies(self, url, apikey):
''' Gets movies from CP server
url (str): url to cp server
apikey (str): cp api key
Reads/parses cp api response
Returns dict ajax-style response
'''
url = '{}/api/{}/movie.list/'.format(url, apikey)
if not url.startswith('http'):
url = 'http://{}'.format(url)
return library.ImportCPLibrary.get_movies(url)
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
''' Imports movies from CP list to library
wanted (list): dicts of wanted movies
finished (list): dicts of finished movies
Yields dict ajax-style response
'''
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted)))
for movie in wanted:
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished)))
for movie in finished:
movie['predb'] = 'found'
movie['status'] = 'Disabled'
movie['origin'] = 'CouchPotato Import'
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_backlog_search(self, movies):
''' Bulk manager action for backlog search
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk backlog search for {} movies.'.format(len(movies)))
ids = [i['imdbid'] for i in movies]
movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids]
for i, movie in enumerate(movies):
logging.info('Performing backlog search for {} {}.'.format(movie['title'], movie['year']))
if not searcher.search(movie):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_update_metadata(self, movies):
''' Bulk manager action for metadata update
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk metadata update for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
r = Metadata.update(movie.get('imdbid'), movie.get('tmdbid'))
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_quality(self, movies, quality):
''' Bulk manager action to change movie quality profile
movies (list): dicts of movies, must contain keys imdbid
quality (str): quality to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_category(self, movies, category):
''' Bulk manager action to change movie category
movies (list): dicts of movies, must contain keys imdbid
category (str): category to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting category to {} for: {}'.format(category, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'category', category, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_change_category._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_reset_movies(self, movies):
''' Bulk manager action to reset movies
movies (list): dicts of movies, must contain key imdbid
Removes all search results
Updates database row with db_reset dict
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Resetting status for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
logging.debug('Resetting {}'.format(movie['imdbid']))
imdbid = movie['imdbid']
if not core.sql.purge_search_results(imdbid):
yield json.dumps({'response': False, 'error': _('Unable to purge search results.'), 'imdbid': imdbid, 'index': i + 1})
continue
db_reset = {'quality': config.default_profile(),
'status': 'Waiting',
'finished_date': None,
'finished_score': None,
'backlog': 0,
'finished_file': None,
'predb': None,
'predb_backlog': None
}
if not core.sql.update_multiple_values('MOVIES', db_reset, 'imdbid', imdbid):
yield json.dumps({'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1})
continue
yield json.dumps({'response': True, 'index': i + 1})
manager_reset_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_remove_movies(self, movies):
''' Bulk action to remove movies
movies (list): dicts of movies, must contain key imdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Removing {} movies from library.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.remove_movie(movie['imdbid'])
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield(json.dumps(response))
manager_remove_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def generate_stats(self, category=None):
''' Gets library stats for graphing page
Returns dict of library stats
'''
return Manage.get_stats(category)
@cherrypy.expose
@cherrypy.tools.json_out()
def create_backup(self):
''' Creates backup zip file ./watcher.zip
Returns dict ajax-style response
'''
logging.info('Creating backup of Watcher as {}'.format(os.path.join(core.PROG_PATH, 'watcher.zip')))
try:
backup.backup(require_confirm=False)
except Exception as e:
logging.error('Unable to create backup.', exc_info=True)
return {'response': False, 'error': str(e)}
return {'response': True, 'message': _('Backup created as {}').format(os.path.join(core.PROG_PATH, 'watcher.zip'))}
@cherrypy.expose
@cherrypy.tools.json_out()
def restore_backup(self, fileUpload):
logging.info('Restoring backup from uploaded zip.')
n = datetime.datetime.today().microsecond
tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n))
try:
with open(tmp_zip, 'wb') as f:
f.seek(0)
f.write(fileUpload.file.read())
logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip))
backup.restore(require_confirm=False, file=tmp_zip)
logging.info('Removing temporary zip {}'.format(tmp_zip))
os.unlink(tmp_zip)
except Exception as e:
logging.error('Unable to restore backup.', exc_info=True)
return {'response': False}
threading.Timer(3, core.restart).start()
return {'response': True}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_task_execute(self, name):
''' Calls task's now() function to execute task now
name (str): name of scheduled task to run
Response includes core.NOTIFICATIONS so the browser can display any
notifications generated during the task.
Returns dict ajax-style response
'''
try:
logging.info('Manually executing task {}.'.format(name))
task = core.scheduler_plugin.task_list[name]
task.now()
le = task.last_execution
return {'response': True, 'message': _('Finished task {}.').format(name), 'last_execution': le, 'notifications': core.NOTIFICATIONS}
except Exception as e:
return {'response': False, 'error': str(e)}
|
elg_demo.py
|
#!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from datasources import Video, Webcam
from models import ELG
import util.gaze
if __name__ == '__main__':
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in device_lib.list_local_devices(config=session_config)
if d.device_type == 'GPU']
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Record output frames to file if requested
if args.record_video:
video_out = None
video_out_queue = queue.Queue()
video_out_should_stop = False
video_out_done = threading.Condition()
def _record_frame():
global video_out
last_frame_time = None
out_fps = 30
out_frame_interval = 1.0 / out_fps
while not video_out_should_stop:
frame_index = video_out_queue.get()
if frame_index is None:
break
assert frame_index in data_source._frames
frame = data_source._frames[frame_index]['bgr']
h, w, _ = frame.shape
if video_out is None:
video_out = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'H264'),
out_fps, (w, h),
)
now_time = time.time()
if last_frame_time is not None:
time_diff = now_time - last_frame_time
while time_diff > 0.0:
video_out.write(frame)
time_diff -= out_frame_interval
last_frame_time = now_time
video_out.release()
with video_out_done:
video_out_done.notify_all()
record_thread = threading.Thread(target=_record_frame, name='record')
record_thread.daemon = True
record_thread.start()
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
cv.imshow('vis', next_frame['bgr'])
if args.record_video:
video_out_queue.put_nowait(next_frame_index)
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
for j in range(batch_size):
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
for landmark in frame_landmarks[f][:-1]:
cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
color=(0, 0, 255), markerType=cv.MARKER_STAR,
markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
cv.drawMarker( # Eyeball centre
bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
else:
gaze_history.clear()
if can_use_eyelid:
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if not args.headless:
cv.imshow('vis', bgr)
last_frame_index = frame_index
# Record frame?
if args.record_video:
video_out_queue.put_nowait(frame_index)
# Quit?
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
while True:
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
inferred_stuff_queue.put_nowait(output)
if not visualize_thread.isAlive():
break
if not data_source._open:
break
# Close video recording
if args.record_video and video_out is not None:
video_out_should_stop = True
video_out_queue.put_nowait(None)
with video_out_done:
video_out_done.wait()
|
__init__.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, itervalues, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
def SharedPluginLoaderObj():
'''This only exists for backwards compat, do not use.
'''
display.deprecated('SharedPluginLoaderObj is deprecated, please directly use ansible.plugins.loader',
version='2.11', collection_name='ansible.builtin')
return plugin_loader
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar):
cond = None
if task.changed_when:
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, TaskResult):
with strategy._results_lock:
# only handlers have the listen attr, so this must be a handler
# we split up the results into two queues here to make sure
# handler and regular result processing don't cross wires
if 'listen' in result._task_fields:
strategy._handler_results.append(result)
else:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except Queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator._host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes, do_handlers=do_handlers)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
iterator._host_states[host.name] = prev_host_state
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._pending_handler_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
# this dictionary is used to keep track of hosts that have
# flushed handlers
self._flushed_hosts = dict()
self._results = deque()
self._handler_results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in itervalues(self._active_connections):
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
if isinstance(task, Handler):
self._pending_handler_results += 1
else:
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def get_original_host(host_name):
# FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
cur_pass = 0
while True:
try:
self._results_lock.acquire()
if do_handlers:
task_result = self._handler_results.popleft()
else:
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
queue_cache_entry = (original_host.name, task_result._task)
found_task = self._queued_task_cache.get(queue_cache_entry)['task']
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# Use of get_active_state() here helps detect proper state if, say, we are in a rescue
# block from an included file (include_tasks). In a non-included rescue case, a rescue
# that starts with a new 'block' will have an active state of ITERATING_TASKS, so we also
# check the current state block tree to see if any blocks are rescuing.
if state and (iterator.get_active_state(state).run_state == iterator.ITERATING_RESCUE or
iterator.is_any_block_rescuing(state)):
self._tqm._stats.increment('rescued', original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
else:
self._tqm._stats.increment('skipped', original_host.name)
task_result._result['skip_reason'] = 'Host %s is unreachable' % original_host.name
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler._valid_attrs['listen'], listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, result_item)
post_process_whens(result_item, original_task, handler_templar)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
post_process_whens(result_item, original_task, handler_templar)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
if do_handlers:
self._pending_handler_results -= 1
else:
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role.get_name()]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_handler_results(self, iterator, handler, notified_hosts):
'''
Wait for the handler tasks to complete, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
handler_results = 0
display.debug("waiting for handler results...")
while (self._pending_handler_results > 0 and
handler_results < len(notified_hosts) and
not self._tqm._terminated):
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator, do_handlers=True)
ret_results.extend(results)
handler_results += len([
r._host for r in results if r._host in notified_hosts and
r.task_name == handler.name])
if self._pending_handler_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending handlers, returning what we have")
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, result_item):
'''
Helper function to add a new host to inventory based on a task result.
'''
changed = False
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
self._hosts_cache_all.append(host_name)
changed = True
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host_vars = new_host.get_vars()
new_host_combined_vars = combine_vars(new_host_vars, host_info.get('host_vars', dict()))
if new_host_vars != new_host_combined_vars:
new_host.vars = new_host_combined_vars
changed = True
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
group_name = self._inventory.add_group(group_name)
changed = True
new_group = self._inventory.groups[group_name]
if new_group.add_host(self._inventory.hosts[host_name]):
changed = True
# reconcile inventory, ensures inventory rules are followed
if changed:
self._inventory.reconcile_inventory()
result_item['changed'] = changed
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts.get(host.name)
if real_host is None:
if host.name == self._inventory.localhost.name:
real_host = self._inventory.localhost
else:
raise AnsibleError('%s cannot be matched in inventory' % host.name)
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
if group_name not in self._inventory.groups:
group_name = self._inventory.add_group(group_name)
for name in parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
new = parent_group.add_child_group(group)
if new and not changed:
changed = True
if real_host not in group.get_hosts():
changed = group.add_host(real_host)
if group not in real_host.get_groups():
changed = real_host.add_group(group)
if changed:
self._inventory.reconcile_inventory()
result_item['changed'] = changed
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._vars)
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
version='2.12', collection_name='ansible.builtin')
included_file._task.tags = tags
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler.notified_hosts:
result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
if notified_hosts is None:
notified_hosts = handler.notified_hosts[:]
# strategy plugins that filter hosts need access to the iterator to identify failed hosts
failed_hosts = self._filter_notified_failed_hosts(iterator, notified_hosts)
notified_hosts = self._filter_notified_hosts(notified_hosts)
notified_hosts += failed_hosts
if len(notified_hosts) > 0:
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
bypass_host_loop = False
try:
action = plugin_loader.action_loader.get(handler.action, class_only=True, collection_list=handler.collections)
if getattr(action, 'BYPASS_HOST_LOOP', False):
bypass_host_loop = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not iterator.is_failed(host) or iterator._play.force_handlers:
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
if not handler.cached_name:
handler.name = templar.template(handler.name)
handler.cached_name = True
self._queue_task(host, handler, task_vars, play_context)
if templar.template(handler.run_once) or bypass_host_loop:
break
# collect the results from the handler run
host_results = self._wait_on_handler_results(iterator, handler, notified_hosts)
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
for task in block.block:
task_name = task.get_name()
display.debug("adding task '%s' included in handler '%s'" % (task_name, handler_name))
task.notified_hosts = included_file._hosts[:]
result = self._do_handler_run(
handler=task,
handler_name=task_name,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(to_text(e))
continue
# remove hosts from notification list
handler.notified_hosts = [
h for h in handler.notified_hosts
if h not in notified_hosts]
display.debug("done running handlers, result is: %s" % result)
return result
def _filter_notified_failed_hosts(self, iterator, notified_hosts):
return []
def _filter_notified_hosts(self, notified_hosts):
'''
Filter notified hosts accordingly to strategy
'''
# As main strategy is linear, we do not filter hosts
# We return a copy to avoid race conditions
return notified_hosts[:]
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
# The top-level conditions should only compare meta_action
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
if task.when:
self._cond_not_supported_warn(meta_action)
msg = "noop"
elif meta_action == 'flush_handlers':
if task.when:
self._cond_not_supported_warn(meta_action)
self._flushed_hosts[target_host] = True
self.run_handlers(iterator, play_context)
self._flushed_hosts[target_host] = False
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
if task.when:
self._cond_not_supported_warn(meta_action)
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg = "cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator._host_states[target_host.name].run_state = iterator.ITERATING_COMPLETE
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
play_context.update_vars(all_vars)
if task.when:
self._cond_not_supported_warn(meta_action)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, shared_loader_obj=None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
download.py
|
#!/usr/bin/env python3
"""
Usage: download.py host port client_id file
"""
import os
import sys
import time
import json
import threading
import _thread
import hashlib
import base64
import paho.mqtt.client as mqtt
summary = {}
chunks = []
file_array = b''
done = False
lock = threading.Lock()
def md5(data):
return hashlib.md5(data).hexdigest()
def download(client, file):
global done
global chunks
global file_array
print("downloading file:", file)
client_id = client._client_id.decode()
topic = client_id + "/" + file
# Subscribe to file info topic.
client.subscribe(topic, qos=1)
print("subscribed to firmware topic: " + topic)
# Wait to retrieve all chunks.
while True:
time.sleep(1)
if len(summary["chunks"]) == len(chunks):
break
# Check and decode all the chunks.
for z in summary["chunks"]:
for c in chunks:
if topic + "/" + str(z["number"]) == c["topic"]:
try:
fhash = md5(c["data"])
if z["hash"] == fhash:
data = base64.b64decode(c["data"])
file_array = b''.join([file_array, data])
else:
print("chunk corrupted:", c["topic"])
except Exception as e:
print("error decoding chunk", e)
os._exit(1)
os.kill(os.getpid)
# Check combined chunks hash and write it to file.
if summary["file"]["hash"] == md5(file_array):
try:
f = open('./'+file+"_download", 'wb')
f.write(file_array)
f.close()
except Exception as e:
print("error writing file", e)
os._exit(1)
os.kill(os.getpid)
print("download finished for file:", file)
os._exit(0)
os.kill(os.getpid)
def read_chunk(client, file, msg):
global summary
global chunks
client_id = client._client_id.decode()
# Get file summary chunk.
try:
if msg.topic == client_id + "/" + file:
summary = json.loads(msg.payload)
# print(json.dumps(summary, indent=4, sort_keys=True))
for c in summary["chunks"]:
client.subscribe(client_id+"/"+file+"/"+str(c["number"]), qos=1)
else:
lock.acquire()
chunks.append({
"topic": msg.topic,
"data": msg.payload,
})
client.unsubscribe(msg.topic)
print("read file chunk:", msg.topic)
lock.release()
except Exception as e:
print("error reading chunk:", e)
os._exit(0)
os.kill(os.getpid)
def main(host="localhost", port=1883, client_id="client_id", file="test.txt"):
if not os.path.isfile(file):
print("error no file:", file)
return 1
def on_connect(c, u, f, rc):
print("connected to", host, port, "with result code:", str(rc))
t = threading.Thread(target=download, args=(c, file,))
t.daemon = True
t.start()
def on_message(c, u, msg):
_thread.start_new_thread(read_chunk, (c, file, msg,))
client = mqtt.Client(client_id)
client.on_connect = on_connect
client.on_message = on_message
client.connect(host, port, 60)
client.loop_forever()
if __name__ == "__main__":
if len(sys.argv) == 5:
main(
host=sys.argv[1],
port=int(sys.argv[2]),
client_id=sys.argv[3],
file=sys.argv[4],
)
else:
print(__doc__)
|
beauty.py
|
import sys
import json
import traceback
import re
import random
import threading
from datetime import date, timedelta
import datetime
import time
from PyPtt import PTT
Woman = []
in_update = False
def get_pw():
try:
with open('account.txt') as AccountFile:
account = json.load(AccountFile)
id = account['ID']
password = account['Password']
except FileNotFoundError:
print('Please note PTT ID and Password in account.txt')
print('{"ID":"YourID", "Password":"YourPassword"}')
sys.exit()
return id, password
def update():
global Woman
global in_update
in_update = True
woman_temp = []
ptt_id, ptt_pw = get_pw()
ptt_bot = PTT.API(
# LogLevel=PTT.LogLevel.TRACE,
# LogLevel=PTT.LogLevel.DEBUG,
)
try:
ptt_bot.login(
ptt_id,
ptt_pw,
kick_other_login=True
)
except PTT.Exceptions.LoginError:
ptt_bot.log('登入失敗')
return
crawl_list = [
('Beauty', PTT.data_type.post_search_type.PUSH, '50'),
]
max_picture = 2000
for (board, search_type, condition) in crawl_list:
try:
index = ptt_bot.get_newest_index(
PTT.data_type.index_type.BBS,
board,
search_type=search_type,
search_condition=condition,
)
# print(f'{board} 最新文章編號 {Index}')
random_post_index = [i for i in range(index - max_picture, index + 1)]
random.shuffle(random_post_index)
catch_pic = 0
for index in random_post_index:
# print(f'準備解析第 {IndexList.index(index) + 1} 篇 編號 {index} 已經蒐集 {Piture} 張圖片')
post = ptt_bot.get_post(
board,
post_index=index,
search_type=search_type,
search_condition=condition
)
if post.delete_status != PTT.data_type.post_delete_status.NOT_DELETED:
continue
if '[正妹]' not in post.title and '[廣告]' not in post.title:
continue
# print(Post.getContent())
content = post.content
content = content[:content.find('--')]
# print(content)
all_pic_id = re.findall(
r'https://(.+).jpg',
content
)
for album in all_pic_id:
pic_url = f'https://{album}.jpg'
if pic_url not in woman_temp:
woman_temp.append(pic_url)
catch_pic += 1
if catch_pic >= max_picture:
break
if catch_pic >= max_picture:
break
# all_pic_id = re.findall(
# r'https://(.+).png',
# content
# )
#
# for album in all_pic_id:
# pic_url = f'https://{album}.png'
#
# if pic_url not in woman_temp:
# woman_temp.append(pic_url)
# catch_pic += 1
#
# if catch_pic >= max_picture:
# break
# if catch_pic >= max_picture:
# break
# all_pic_id = re.findall(
# r'https://(.+).gif',
# content
# )
#
# for album in all_pic_id:
# pic_url = f'https://{album}.gif'
#
# if pic_url not in woman_temp:
# woman_temp.append(pic_url)
# catch_pic += 1
#
# if catch_pic >= max_picture:
# break
# if catch_pic >= max_picture:
# break
# print(f'已抓取 {catch_pic} 張圖')
except Exception as e:
traceback.print_tb(e.__traceback__)
print(e)
break
# print('=' * 50)
ptt_bot.logout()
Woman = woman_temp
in_update = False
print('更新完畢')
# print(f'Woman length {len(Woman)}')
def timer():
while True:
update()
now = datetime.datetime.now()
tomorrow = date.today() - timedelta(-1)
refresh_time = datetime.datetime(
tomorrow.year,
tomorrow.month,
tomorrow.day,
6,
0,
0)
interval = refresh_time - now
sec = interval.days * 24 * 3600 + interval.seconds
print(interval)
print(f'{sec} 秒後更新表特資料')
time.sleep(sec)
def start(test_mode=False):
t = threading.Thread(target=timer)
t.daemon = True
t.start()
if test_mode:
t.join()
temp_list = []
def pickup(n=1):
global temp_list
if len(temp_list) < n:
global Woman
temp_list = Woman.copy()
result = random.sample(temp_list, n)
# print(f'正妹剩下 {len(WomanTemplist)} 張還沒抽')
return result
if __name__ == "__main__":
# update()
# print(pickup())
start(test_mode=True)
|
bench.py
|
#!/usr/bin/env python
import collections
import os
import subprocess
import sys
import time
import threading
class TimeoutException(Exception):
pass
class ProcDiedException(Exception):
pass
def check_call(*args, **kw):
timeout = kw.pop("timeout", None)
kw['stdout'] = subprocess.PIPE
kw['stderr'] = subprocess.STDOUT
if timeout is None:
try:
return subprocess.check_call(*args, **kw)
except subprocess.CalledProcessError:
raise ProcDiedException()
done_by = time.time() + timeout
p = subprocess.Popen(*args, **kw)
def inner():
p.communicate()
p.wait()
t = threading.Thread(target=inner)
t.setDaemon(True)
t.start()
t.join(done_by - time.time())
if p.poll() is None:
p.terminate()
time.sleep(.01)
if p.poll() is None:
p.kill()
time.sleep(.01)
assert p.poll() is not None
raise TimeoutException()
if p.poll() != 0:
raise ProcDiedException()
MAX_RUNTIME = 5.0
MAX_CPYTHON_TIME = 30.0
MAX_BUILDTIME = 25.0
MAX_TIME = 10.0
MAX_RUNS = 5
field_width = 19
RUNNERS = []
def make_runners():
class ICBDRunner(object):
def make(self, fn):
assert fn.endswith(".py")
# check_call("rm -fv -- %(p)s.out* %(p)s*gen.ll %(p)s*opt.ll %(p)s*.s" % {'p':fn[:-3]}, shell=True)
check_call(["make", fn[:-3] + ".out3", "OPTLEVEL=-O3"], timeout=MAX_BUILDTIME)
def get_cmd(self, fn):
return os.path.join('.', fn[:-3] + ".out3")
class ShedskinRunner(object):
def make(self, fn):
assert fn.endswith(".py")
check_call(["make", fn[:-3] + ".shed"], timeout=MAX_BUILDTIME)
def get_cmd(self, fn):
return os.path.join('.', fn[:-3] + ".shed")
class CppRunner(object):
def make(self, fn):
assert fn.endswith(".py")
cpp = fn[:-3] + "_byhand.cpp"
if not os.path.exists(cpp):
raise ProcDiedException()
check_call(["g++", "-O3", "-funroll-loops", cpp, "-o", fn[:-3]], timeout=MAX_BUILDTIME)
def get_cmd(self, fn):
assert fn.endswith(".py")
cpp = fn[:-3] + "_byhand.cpp"
if not os.path.exists(cpp):
return None
return float("nan")
return os.path.join('.', fn[:-3])
class InterpretedRunner(object):
def __init__(self, interpreter):
self._int = interpreter
if interpreter == "python":
self.timeout = MAX_CPYTHON_TIME
else:
self.timeout = MAX_RUNTIME
def make(self, fn):
pass
def get_cmd(self, fn):
return "%s %s" % (self._int, fn)
class CPythonRunner(InterpretedRunner):
def __init__(self):
super(CPythonRunner, self).__init__("python")
class PypyRunner(InterpretedRunner):
def __init__(self):
super(PypyRunner, self).__init__("pypy")
RUNNERS.append(('cpython', CPythonRunner()))
RUNNERS.append(('pypy', PypyRunner()))
RUNNERS.append(('shedskin', ShedskinRunner()))
RUNNERS.append(('icbd', ICBDRunner()))
RUNNERS.append(('cpp', CppRunner()))
make_runners()
_cfgs = {}
def get_config(fn):
config_fn = os.path.join(os.path.dirname(fn), "tests.cfg")
if config_fn not in _cfgs:
if not os.path.exists(config_fn):
_cfgs[config_fn] = None
else:
_cfgs[config_fn] = open(config_fn).read().split('\n')
cfg = _cfgs[config_fn]
if cfg is None:
return "%(prog)s"
bn = os.path.basename(fn)
for l in cfg:
if l.startswith(bn + ":"):
cmd = l[len(bn) + 1:].strip()
return cmd
return "%(prog)s"
if __name__ == "__main__":
progs = sys.argv[1:]
if not progs:
progs = [
"benchmarks/micro/12.py",
"benchmarks/micro/25.py",
"benchmarks/micro/31.py",
"benchmarks/micro/33.py",
"benchmarks/micro/66.py",
"benchmarks/cj/2012_1a_b.py",
"benchmarks/cj/2012_2_a.py",
"benchmarks/modified/nbody.py",
"benchmarks/modified/raytrace.py",
"benchmarks/modified/pystone.py",
"benchmarks/modified/fannkuch.py",
"benchmarks/modified/float.py",
"benchmarks/us/tuple_gc_hell.py",
]
q = collections.deque(progs)
files = []
while q:
fn = q.popleft()
if os.path.isdir(fn):
for n in sorted(os.listdir(fn)):
q.appendleft(os.path.join(fn, n))
elif fn.endswith(".py"):
cmd = get_config(fn)
files.append((fn, cmd))
else:
print "Skipping", fn
# files.sort()
check_call(["make", "compiler", "-j4"])
fn_len = max([len(f[0]) for f in files])
sys.stdout.write(" " * fn_len)
for name, runner in RUNNERS:
sys.stdout.write(name.rjust(field_width))
print
for fn, runline in files:
sys.stdout.write(fn.rjust(fn_len))
sys.stdout.flush()
cpython_time = None
for name, runner in RUNNERS:
start = time.time()
try:
runner.make(fn)
except TimeoutException:
sys.stdout.write("\033[33m" + "BT".rjust(field_width) + "\033[0m")
sys.stdout.flush()
continue
except ProcDiedException:
sys.stdout.write("\033[31m" + "BE".rjust(field_width) + "\033[0m")
sys.stdout.flush()
continue
# print "%.1fms to build" % ((time.time() - start) * 1000.0)
failed = False
elapsed = []
for i in xrange(MAX_RUNS):
start = time.time()
try:
prog = runner.get_cmd(os.path.basename(fn))
cmd = runline % {
'prog':prog
}
timeout = MAX_CPYTHON_TIME if name == "cpython" else MAX_RUNTIME
check_call(cmd, timeout=timeout, cwd=os.path.dirname(fn), shell=True)
except TimeoutException:
if name == "cpython":
cpython_time = float("inf")
sys.stdout.write("\033[33m" + "RT".rjust(field_width) + "\033[0m")
sys.stdout.flush()
failed = True
break
except ProcDiedException:
if name == "cpython":
cpython_time = float("inf")
sys.stdout.write("\033[1;31m" + "RE".rjust(field_width) + "\033[0m")
sys.stdout.flush()
failed = True
break
elapsed.append(time.time() - start)
if sum(elapsed) > MAX_TIME and len(elapsed) >= 2:
break
if failed:
continue
runtime = min(elapsed)
if name == "cpython":
cpython_time = runtime
sys.stdout.write(("%.1fms (%.2fx)" % (runtime * 1000.0, cpython_time / runtime)).rjust(field_width))
sys.stdout.flush()
# print "%.1fms - %.1fms (%d trials)" % (min(elapsed) * 1000.0, max(elapsed) * 1000.0, len(elapsed))
print
|
channel_inner_service.py
|
"""Channel Inner Service."""
import json
import multiprocessing as mp
import signal
from asyncio import Condition
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from typing import Union, Dict, List, Tuple
from earlgrey import *
from pkg_resources import parse_version
from loopchain import configure as conf
from loopchain import utils as util
from loopchain.baseservice import (BroadcastCommand, BroadcastScheduler, BroadcastSchedulerFactory,
ScoreResponse)
from loopchain.baseservice.module_process import ModuleProcess, ModuleProcessProperties
from loopchain.blockchain.blocks import Block, BlockSerializer
from loopchain.blockchain.exception import *
from loopchain.blockchain.transactions import (Transaction, TransactionSerializer, TransactionVerifier,
TransactionVersioner)
from loopchain.blockchain.types import Hash32
from loopchain.blockchain.votes import Vote
from loopchain.channel.channel_property import ChannelProperty
from loopchain.jsonrpc.exception import JsonError
from loopchain.protos import message_code
from loopchain.qos.qos_controller import QosController, QosCountControl
from loopchain.utils.message_queue import StubCollection
if TYPE_CHECKING:
from loopchain.channel.channel_service import ChannelService
class ChannelTxCreatorInnerTask:
def __init__(self, channel_name: str, peer_target: str, tx_versioner: TransactionVersioner):
self.__channel_name = channel_name
self.__properties = dict()
self.__tx_versioner = tx_versioner
scheduler = BroadcastSchedulerFactory.new(channel=channel_name,
self_target=peer_target,
is_multiprocessing=True)
scheduler.start()
self.__broadcast_scheduler = scheduler
self.__qos_controller = QosController()
self.__qos_controller.append(QosCountControl(limit_count=conf.TPS_LIMIT_PER_SEC))
def __pre_validate(self, tx: Transaction):
if not util.is_in_time_boundary(tx.timestamp, conf.TIMESTAMP_BOUNDARY_SECOND):
raise TransactionOutOfTimeBound(tx, util.get_now_time_stamp())
def cleanup(self):
self.__broadcast_scheduler.stop()
self.__broadcast_scheduler.wait()
self.__broadcast_scheduler: BroadcastScheduler = None
@message_queue_task
async def update_properties(self, properties: dict):
self.__properties.update(properties)
@message_queue_task
async def create_icx_tx(self, kwargs: dict):
tx_hash = None
relay_target = None
if self.__qos_controller.limit():
util.logger.debug(f"Out of TPS limit. tx={kwargs}")
return message_code.Response.fail_out_of_tps_limit, tx_hash, relay_target
node_type = self.__properties.get('node_type', None)
if node_type is None:
util.logger.warning("Node type has not been set yet.")
return NodeInitializationError.message_code, tx_hash, relay_target
elif node_type != conf.NodeType.CommunityNode.value:
relay_target = self.__properties.get('relay_target', None)
return message_code.Response.fail_no_permission, tx_hash, relay_target
result_code = None
exception = None
tx = None
try:
tx_version, tx_type = self.__tx_versioner.get_version(kwargs)
ts = TransactionSerializer.new(tx_version, tx_type, self.__tx_versioner)
tx = ts.from_(kwargs)
nid = self.__properties.get('nid', None)
if nid is None:
util.logger.warning(f"NID has not been set yet.")
raise NodeInitializationError(tx.hash.hex())
tv = TransactionVerifier.new(tx_version, tx_type, self.__tx_versioner)
tv.pre_verify(tx, nid=nid)
self.__pre_validate(tx)
logging.debug(f"create icx input : {kwargs}")
self.__broadcast_scheduler.schedule_job(BroadcastCommand.CREATE_TX, (tx, self.__tx_versioner))
return message_code.Response.success, tx.hash.hex(), relay_target
except MessageCodeError as e:
result_code = e.message_code
exception = e
traceback.print_exc()
except BaseException as e:
result_code = TransactionInvalidError.message_code
exception = e
traceback.print_exc()
finally:
if exception:
logging.warning(f"create_icx_tx: tx restore fail.\n\n"
f"kwargs({kwargs})\n\n"
f"tx({tx})\n\n"
f"exception({exception})")
return result_code, tx_hash, relay_target
async def schedule_job(self, command, params):
self.__broadcast_scheduler.schedule_job(command, params)
class ChannelTxCreatorInnerService(MessageQueueService[ChannelTxCreatorInnerTask]):
TaskType = ChannelTxCreatorInnerTask
def __init__(self, broadcast_queue: mp.Queue, amqp_target, route_key, username=None, password=None, **task_kwargs):
super().__init__(amqp_target, route_key, username, password, **task_kwargs)
self.__is_running = True
self.__broadcast_queue = broadcast_queue
async def _stop_loop():
self.loop.stop()
def _schedule_job():
while True:
command, params = broadcast_queue.get()
if not self.__is_running or command is None:
break
asyncio.run_coroutine_threadsafe(self._task.schedule_job(command, params), self.loop)
while not broadcast_queue.empty():
broadcast_queue.get()
asyncio.run_coroutine_threadsafe(_stop_loop(), self.loop)
self.__broadcast_thread = threading.Thread(target=_schedule_job)
self.__broadcast_thread.start()
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
def stop(self):
self.__broadcast_queue.put((None, None))
self.__is_running = False # even if broadcast queue has some items, the loop will be stopped immediately.
def cleanup(self):
self.__broadcast_thread.join()
self._task.cleanup()
@staticmethod
def main(channel_name: str, amqp_target: str, amqp_key: str, peer_target: str,
tx_versioner: TransactionVersioner, broadcast_queue: mp.Queue, properties: ModuleProcessProperties=None):
if properties is not None:
ModuleProcess.load_properties(properties, "txcreator")
logging.info(f"Channel TX Creator start")
broadcast_queue.cancel_join_thread()
queue_name = conf.CHANNEL_TX_CREATOR_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=amqp_key)
service = ChannelTxCreatorInnerService(broadcast_queue,
amqp_target,
queue_name,
conf.AMQP_USERNAME,
conf.AMQP_PASSWORD,
channel_name=channel_name,
peer_target=peer_target,
tx_versioner=tx_versioner)
def _on_signal(signal_num):
logging.error(f"Channel TX Creator has been received signal({signal_num})")
service.stop()
service.loop.add_signal_handler(signal.SIGTERM, _on_signal, (signal.SIGTERM,))
service.loop.add_signal_handler(signal.SIGINT, _on_signal, (signal.SIGINT,))
service.serve(connection_attempts=conf.AMQP_CONNECTION_ATTEMPTS,
retry_delay=conf.AMQP_RETRY_DELAY, exclusive=True)
logging.info("ChannelTxCreatorInnerService: started")
service.serve_all()
service.cleanup()
service.loop.close()
logging.info("ChannelTxCreatorInnerService: stopped")
class ChannelTxCreatorInnerStub(MessageQueueStub[ChannelTxCreatorInnerTask]):
TaskType = ChannelTxCreatorInnerTask
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
class ChannelTxReceiverInnerTask:
def __init__(self, tx_versioner: TransactionVersioner, tx_queue: mp.Queue):
self.__nid: int = None
self.__tx_versioner = tx_versioner
self.__tx_queue = tx_queue
@message_queue_task
async def update_properties(self, properties: dict):
try:
self.__nid = properties['nid']
except KeyError:
pass
@message_queue_task(type_=MessageQueueType.Worker)
def add_tx_list(self, request) -> tuple:
if self.__nid is None:
response_code = message_code.Response.fail
message = "Node initialization is not completed."
return response_code, message
tx_list = []
for tx_item in request.tx_list:
tx_json = json.loads(tx_item.tx_json)
tx_version, tx_type = self.__tx_versioner.get_version(tx_json)
ts = TransactionSerializer.new(tx_version, tx_type, self.__tx_versioner)
tx = ts.from_(tx_json)
tv = TransactionVerifier.new(tx_version, tx_type, self.__tx_versioner)
tv.pre_verify(tx, nid=self.__nid)
tx.size(self.__tx_versioner)
tx_list.append(tx)
tx_len = len(tx_list)
if tx_len == 0:
response_code = message_code.Response.fail
message = "fail tx validate while AddTxList"
else:
self.__tx_queue.put(tx_list)
response_code = message_code.Response.success
message = f"success ({len(tx_list)})/({len(request.tx_list)})"
return response_code, message
class ChannelTxReceiverInnerService(MessageQueueService[ChannelTxReceiverInnerTask]):
TaskType = ChannelTxReceiverInnerTask
def __init__(self, amqp_target, route_key, username=None, password=None, **task_kwargs):
super().__init__(amqp_target, route_key, username, password, **task_kwargs)
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
@staticmethod
def main(channel_name: str, amqp_target: str, amqp_key: str,
tx_versioner: TransactionVersioner, tx_queue: mp.Queue, properties: ModuleProcessProperties=None):
if properties is not None:
ModuleProcess.load_properties(properties, "txreceiver")
logging.info(f"Channel TX Receiver start")
tx_queue.cancel_join_thread()
queue_name = conf.CHANNEL_TX_RECEIVER_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=amqp_key)
service = ChannelTxReceiverInnerService(amqp_target, queue_name,
conf.AMQP_USERNAME, conf.AMQP_PASSWORD,
tx_versioner=tx_versioner, tx_queue=tx_queue)
async def _stop_loop():
service.loop.stop()
def _on_signal(signal_num):
logging.error(f"Channel TX Receiver has been received signal({signal_num})")
asyncio.run_coroutine_threadsafe(_stop_loop(), service.loop)
service.loop.add_signal_handler(signal.SIGTERM, _on_signal, (signal.SIGTERM,))
service.loop.add_signal_handler(signal.SIGINT, _on_signal, (signal.SIGINT,))
service.serve(connection_attempts=conf.AMQP_CONNECTION_ATTEMPTS,
retry_delay=conf.AMQP_RETRY_DELAY, exclusive=True)
logging.info("ChannelTxReceiverInnerService: started")
service.serve_all()
service.loop.close()
logging.info("ChannelTxReceiverInnerService: stopped")
class ChannelTxReceiverInnerStub(MessageQueueStub[ChannelTxReceiverInnerTask]):
TaskType = ChannelTxReceiverInnerTask
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
class _ChannelTxCreatorProcess(ModuleProcess):
def __init__(self, tx_versioner: TransactionVersioner, broadcast_scheduler: BroadcastScheduler,
crash_callback_in_join_thread):
super().__init__()
self.__broadcast_queue = self.Queue()
self.__broadcast_queue.cancel_join_thread()
args = (ChannelProperty().name,
StubCollection().amqp_target,
StubCollection().amqp_key,
ChannelProperty().peer_target,
tx_versioner,
self.__broadcast_queue)
super().start(target=ChannelTxCreatorInnerService.main,
args=args,
crash_callback_in_join_thread=crash_callback_in_join_thread)
self.__broadcast_scheduler = broadcast_scheduler
commands = (BroadcastCommand.UPDATE_AUDIENCE,)
broadcast_scheduler.add_schedule_listener(self.__broadcast_callback, commands=commands)
def start(self, target, args=(), crash_callback_in_join_thread=None):
raise AttributeError("Doesn't support this function")
def join(self):
self.__broadcast_scheduler.remove_schedule_listener(self.__broadcast_callback)
super().join()
self.__broadcast_queue: mp.Queue = None
def __broadcast_callback(self, command, params):
self.__broadcast_queue.put((command, params))
class _ChannelTxReceiverProcess(ModuleProcess):
def __init__(self, tx_versioner: TransactionVersioner, add_tx_list_callback, loop, crash_callback_in_join_thread):
super().__init__()
self.__is_running = True
self.__tx_queue = self.Queue()
self.__tx_queue.cancel_join_thread()
async def _add_tx_list(tx_list):
add_tx_list_callback(tx_list)
def _receive_tx_list(tx_queue):
while True:
tx_list = tx_queue.get()
if not self.__is_running or tx_list is None:
break
asyncio.run_coroutine_threadsafe(_add_tx_list(tx_list), loop)
while not tx_queue.empty():
tx_queue.get()
self.__receive_thread = threading.Thread(target=_receive_tx_list, args=(self.__tx_queue,))
self.__receive_thread.start()
args = (ChannelProperty().name,
StubCollection().amqp_target,
StubCollection().amqp_key,
tx_versioner,
self.__tx_queue)
super().start(target=ChannelTxReceiverInnerService.main,
args=args,
crash_callback_in_join_thread=crash_callback_in_join_thread)
def start(self, target, args=(), crash_callback_in_join_thread=None):
raise AttributeError("Doesn't support this function")
def join(self):
super().join()
self.__tx_queue.put(None)
self.__is_running = False # even if tx queue has some items, the loop will be stopped immediately.
self.__receive_thread.join()
self.__tx_queue: mp.Queue = None
self.__receive_thread: threading.Thread = None
class ChannelInnerTask:
def __init__(self, channel_service: 'ChannelService'):
self._channel_service = channel_service
self._block_manager = None
self._blockchain = None
self._thread_pool = ThreadPoolExecutor(1, "ChannelInnerThread")
# Citizen
CitizenInfo = namedtuple("CitizenInfo", "peer_id target connected_time")
self._CitizenInfo = CitizenInfo
self._citizens: Dict[str, CitizenInfo] = dict()
self._citizen_condition_new_block: Condition = None
self._citizen_condition_unregister: Condition = None
self.__sub_processes = []
self.__loop_for_sub_services = None
def init_sub_service(self, loop):
if len(self.__sub_processes) > 0:
raise RuntimeError("Channel sub services have already been initialized")
if loop is None:
raise RuntimeError("Channel sub services need a loop")
self.__loop_for_sub_services = loop
self._block_manager = self._channel_service.block_manager
self._blockchain = self._channel_service.block_manager.blockchain
tx_versioner = self._blockchain.tx_versioner
def crash_callback_in_join_thread(process: ModuleProcess):
asyncio.run_coroutine_threadsafe(self.__handle_crash_sub_services(process), loop)
broadcast_scheduler = self._channel_service.broadcast_scheduler
tx_creator_process = _ChannelTxCreatorProcess(tx_versioner,
broadcast_scheduler,
crash_callback_in_join_thread)
self.__sub_processes.append(tx_creator_process)
logging.info(f"Channel({ChannelProperty().name}) TX Creator: initialized")
tx_receiver_process = _ChannelTxReceiverProcess(tx_versioner,
self.__add_tx_list,
loop,
crash_callback_in_join_thread)
self.__sub_processes.append(tx_receiver_process)
logging.info(f"Channel({ChannelProperty().name}) TX Receiver: initialized")
def update_sub_services_properties(self, **properties):
logging.info(f"properties {properties}")
stub = StubCollection().channel_tx_creator_stubs[ChannelProperty().name]
asyncio.run_coroutine_threadsafe(stub.async_task().update_properties(properties), self.__loop_for_sub_services)
stub = StubCollection().channel_tx_receiver_stubs[ChannelProperty().name]
asyncio.run_coroutine_threadsafe(stub.async_task().update_properties(properties), self.__loop_for_sub_services)
def cleanup_sub_services(self):
for process in self.__sub_processes:
process.terminate()
process.join()
self.__sub_processes = []
async def __handle_crash_sub_services(self, process: ModuleProcess):
try:
self.__sub_processes.remove(process)
process.join()
logging.critical(f"Channel sub process crash occurred. process={process}")
async def _close():
if not self.__loop_for_sub_services.is_closed():
self._channel_service.close()
asyncio.ensure_future(_close(), loop=self.__loop_for_sub_services)
except ValueError:
# Call this function by cleanup
pass
def __add_tx_list(self, tx_list):
for tx in tx_list:
if tx.hash.hex() in self._block_manager.get_tx_queue():
util.logger.debug(f"tx hash {tx.hash.hex_0x()} already exists in transaction queue.")
continue
if self._blockchain.find_tx_by_key(tx.hash.hex()):
util.logger.debug(f"tx hash {tx.hash.hex_0x()} already exists in blockchain.")
continue
self._block_manager.add_tx_obj(tx)
util.apm_event(ChannelProperty().peer_id, {
'event_type': 'AddTx',
'peer_id': ChannelProperty().peer_id,
'peer_name': conf.PEER_NAME,
'channel_name': ChannelProperty().name,
'data': {'tx_hash': tx.hash.hex()}})
if not conf.ALLOW_MAKE_EMPTY_BLOCK:
self._channel_service.start_leader_complain_timer_if_tx_exists()
@message_queue_task
async def hello(self):
return 'channel_hello'
@message_queue_task
async def announce_new_block(self, subscriber_block_height: int, subscriber_id: str):
while True:
my_block_height = self._blockchain.block_height
if subscriber_block_height > my_block_height:
logging.warning(f"subscriber's height({subscriber_block_height}) is higher "
f"than this node's height({my_block_height}).")
self._channel_service.inner_service.notify_unregister()
error_msg = {"error": "Invalid block height from citizen."}
return json.dumps(error_msg), b''
elif subscriber_block_height == my_block_height:
async with self._citizen_condition_new_block:
await self._citizen_condition_new_block.wait()
new_block_height = subscriber_block_height + 1
new_block = self._blockchain.find_block_by_height(new_block_height)
if new_block is None:
logging.warning(f"Cannot find block height({new_block_height})")
# To prevent excessive occupancy of the CPU in an infinite loop
await asyncio.sleep(2 * conf.INTERVAL_BLOCKGENERATION)
continue
confirm_info: bytes = self._blockchain.find_confirm_info_by_hash(new_block.header.hash)
logging.debug(f"announce_new_block: height({new_block.header.height}), to: {subscriber_id}")
bs = BlockSerializer.new(new_block.header.version, self._blockchain.tx_versioner)
return json.dumps(bs.serialize(new_block)), confirm_info
@message_queue_task
async def register_citizen(self, peer_id, target, connected_time):
register_condition = (len(self._citizens) < conf.SUBSCRIBE_LIMIT
and (peer_id not in self._citizens)
and not (conf.SAFE_BLOCK_BROADCAST and
self._channel_service.state_machine.state == 'BlockGenerate'))
if register_condition:
new_citizen = self._CitizenInfo(peer_id, target, connected_time)
self._citizens[peer_id] = new_citizen
logging.info(f"register new citizen: {new_citizen}")
logging.debug(f"remaining all citizens: {self._citizens}")
return register_condition
@message_queue_task
async def unregister_citizen(self, peer_id):
try:
logging.info(f"unregister citizen: {peer_id}")
del self._citizens[peer_id]
logging.debug(f"remaining all citizens: {self._citizens}")
except KeyError as e:
logging.warning(f"already unregistered citizen({peer_id})")
@message_queue_task
async def wait_for_unregister_signal(self, subscriber_id: str):
async with self._citizen_condition_unregister:
await self._citizen_condition_unregister.wait()
logging.debug(f"citizen({subscriber_id}) will be unregistered from this node")
return True
@message_queue_task
async def is_citizen_registered(self, peer_id) -> bool:
return peer_id in self._citizens
@message_queue_task
async def get_citizens(self) -> List[Dict[str, str]]:
return [{"id": ctz.peer_id, "target": ctz.target, "connected_time": ctz.connected_time}
for ctz in self._citizens.values()]
@message_queue_task
async def get_reps_by_hash(self, reps_hash: str) -> List[Dict[str, str]]:
new_reps_hash = Hash32.fromhex(reps_hash)
preps = self._blockchain.find_preps_by_roothash(new_reps_hash)
return preps
@message_queue_task(priority=255)
async def get_status(self):
status_data = dict()
status_data["made_block_count"] = self._blockchain.my_made_block_count
status_data["leader_made_block_count"] = self._blockchain.leader_made_block_count
block_height = 0
unconfirmed_block_height = None
peer_count = -1
last_block = self._blockchain.last_block
last_unconfirmed_block = self._blockchain.last_unconfirmed_block
if last_block:
block_height = last_block.header.height
peer_count = len(self._blockchain.find_preps_addresses_by_header(last_block.header))
if last_unconfirmed_block:
unconfirmed_block_height = last_unconfirmed_block.header.height
status_data["nid"] = ChannelProperty().nid
status_data["status"] = self._block_manager.service_status
status_data["state"] = self._channel_service.state_machine.state
status_data["service_available"]: bool = \
(status_data["state"] in self._channel_service.state_machine.service_available_states)
status_data["peer_type"] = \
str(1 if self._channel_service.state_machine.state == "BlockGenerate" else 0)
status_data["audience_count"] = "0"
status_data["consensus"] = str(conf.CONSENSUS_ALGORITHM.name)
status_data["peer_id"] = str(ChannelProperty().peer_id)
status_data["block_height"] = block_height
status_data["round"] = self._block_manager.epoch.round if self._block_manager.epoch else -1
status_data["epoch_height"] = self._block_manager.epoch.height if self._block_manager.epoch else -1
status_data["unconfirmed_block_height"] = unconfirmed_block_height or -1
status_data["total_tx"] = self._block_manager.get_total_tx()
status_data["unconfirmed_tx"] = self._block_manager.get_count_of_unconfirmed_tx()
status_data["peer_target"] = ChannelProperty().peer_target
status_data["leader_complaint"] = 1
status_data["peer_count"] = peer_count
status_data["leader"] = self._block_manager.epoch.leader_id if self._block_manager.epoch else ""
status_data["epoch_leader"] = self._block_manager.epoch.leader_id if self._block_manager.epoch else ""
status_data["versions"] = conf.ICON_VERSIONS
return status_data
@message_queue_task
def create_tx(self, data):
tx = Transaction()
score_id = ""
score_version = ""
try:
score_info = self._channel_service.score_info
score_id = score_info[message_code.MetaParams.ScoreInfo.score_id]
score_version = score_info[message_code.MetaParams.ScoreInfo.score_version]
except KeyError as e:
logging.debug(f"CreateTX : load score info fail\n"
f"cause : {e}")
send_tx_type = self._channel_service.get_channel_option()["send_tx_type"]
tx.init_meta(ChannelProperty().peer_id, score_id, score_version, ChannelProperty().name, send_tx_type)
tx.put_data(data)
tx.sign_hash(ChannelProperty().peer_auth)
self._channel_service.broadcast_scheduler.schedule_job(BroadcastCommand.CREATE_TX, tx)
try:
data_log = json.loads(data)
except Exception as e:
data_log = {'tx_hash': tx.tx_hash}
util.apm_event(ChannelProperty().peer_id, {
'event_type': 'CreateTx',
'peer_id': ChannelProperty().peer_id,
'peer_name': conf.PEER_NAME,
'channel_name': ChannelProperty().name,
'tx_hash': tx.tx_hash,
'data': data_log})
return tx.tx_hash
@message_queue_task(type_=MessageQueueType.Worker)
def add_tx(self, request) -> None:
tx_json = request.tx_json
tx_versioner = self._blockchain.tx_versioner
tx_version, tx_type = tx_versioner.get_version(tx_json)
ts = TransactionSerializer.new(tx_version, tx_type, tx_versioner)
tx = ts.from_(tx_json)
tv = TransactionVerifier.new(tx_version, tx_type, tx_versioner)
tv.verify(tx)
if tx is not None:
self._block_manager.add_tx_obj(tx)
util.apm_event(ChannelProperty().peer_id, {
'event_type': 'AddTx',
'peer_id': ChannelProperty().peer_id,
'peer_name': conf.PEER_NAME,
'channel_name': ChannelProperty().name,
'data': {'tx_hash': tx.tx_hash}})
if not conf.ALLOW_MAKE_EMPTY_BLOCK:
self._channel_service.start_leader_complain_timer_if_tx_exists()
@message_queue_task
def get_tx(self, tx_hash):
return self._block_manager.get_tx(tx_hash)
@message_queue_task
def get_tx_info(self, tx_hash):
tx = self._block_manager.get_tx_queue().get(tx_hash, None)
if tx:
tx_serializer = TransactionSerializer.new(tx.version, tx.type(), self._blockchain.tx_versioner)
tx_origin = tx_serializer.to_origin_data(tx)
logging.info(f"get_tx_info pending : tx_hash({tx_hash})")
tx_info = dict()
tx_info["transaction"] = tx_origin
tx_info["tx_index"] = None
tx_info["block_height"] = None
tx_info["block_hash"] = None
return message_code.Response.success, tx_info
else:
try:
return message_code.Response.success, self._block_manager.get_tx_info(tx_hash)
except KeyError as e:
logging.error(f"get_tx_info error : tx_hash({tx_hash}) not found error({e})")
response_code = message_code.Response.fail_invalid_key_error
return response_code, None
@message_queue_task(type_=MessageQueueType.Worker)
async def announce_unconfirmed_block(self, block_dumped, round_: int) -> None:
try:
unconfirmed_block = self._blockchain.block_loads(block_dumped)
except BlockError as e:
traceback.print_exc()
logging.error(f"announce_unconfirmed_block: {e}")
return
util.logger.debug(
f"announce_unconfirmed_block \n"
f"peer_id({unconfirmed_block.header.peer_id.hex()})\n"
f"height({unconfirmed_block.header.height})\n"
f"round({round_})\n"
f"hash({unconfirmed_block.header.hash.hex()})")
if self._channel_service.state_machine.state not in \
("Vote", "Watch", "LeaderComplain", "BlockGenerate"):
util.logger.debug(f"Can't add unconfirmed block in state({self._channel_service.state_machine.state}).")
return
last_block = self._blockchain.last_block
if last_block is None:
util.logger.debug("BlockChain has not been initialized yet.")
return
try:
self._block_manager.verify_confirm_info(unconfirmed_block)
except ConfirmInfoInvalid as e:
util.logger.warning(f"ConfirmInfoInvalid {e}")
except ConfirmInfoInvalidNeedBlockSync as e:
util.logger.debug(f"ConfirmInfoInvalidNeedBlockSync {e}")
if self._channel_service.state_machine.state == "BlockGenerate" and (
self._block_manager.consensus_algorithm and self._block_manager.consensus_algorithm.is_running):
self._block_manager.consensus_algorithm.stop()
else:
self._channel_service.state_machine.block_sync()
except ConfirmInfoInvalidAddedBlock as e:
util.logger.warning(f"ConfirmInfoInvalidAddedBlock {e}")
except NotReadyToConfirmInfo as e:
util.logger.warning(f"NotReadyToConfirmInfo {e}")
else:
self._channel_service.state_machine.vote(unconfirmed_block=unconfirmed_block, round_=round_)
@message_queue_task
def block_sync(self, block_hash, block_height):
response_code = None
block: Block = None
if block_hash != "":
block = self._blockchain.find_block_by_hash(block_hash)
elif block_height != -1:
block = self._blockchain.find_block_by_height(block_height)
else:
response_code = message_code.Response.fail_not_enough_data
if self._blockchain.last_unconfirmed_block is None:
unconfirmed_block_height = -1
else:
unconfirmed_block_height = self._blockchain.last_unconfirmed_block.header.height
if block is None:
if response_code is None:
response_code = message_code.Response.fail_wrong_block_hash
return response_code, -1, self._blockchain.block_height, unconfirmed_block_height, None, None
confirm_info = None
if 0 < block.header.height <= self._blockchain.block_height:
confirm_info = self._blockchain.find_confirm_info_by_hash(block.header.hash)
if not confirm_info and parse_version(block.header.version) >= parse_version("0.3"):
response_code = message_code.Response.fail_no_confirm_info
return response_code, -1, self._blockchain.block_height, unconfirmed_block_height, None, None
return (message_code.Response.success, block.header.height, self._blockchain.block_height,
unconfirmed_block_height, confirm_info, self._blockchain.block_dumps(block))
@message_queue_task(type_=MessageQueueType.Worker)
def vote_unconfirmed_block(self, vote_dumped: str) -> None:
try:
vote_serialized = json.loads(vote_dumped)
except json.decoder.JSONDecodeError:
util.logger.warning(f"This vote({vote_dumped}) may be from old version.")
else:
version = self._blockchain.block_versioner.get_version(int(vote_serialized["blockHeight"], 16))
vote = Vote.get_block_vote_class(version).deserialize(vote_serialized)
util.logger.debug(
f"Peer vote to: {vote.block_height}({vote.round}) {vote.block_hash} from {vote.rep.hex_hx()}"
)
self._block_manager.candidate_blocks.add_vote(vote)
if self._channel_service.state_machine.state == "BlockGenerate" and \
self._block_manager.consensus_algorithm:
self._block_manager.consensus_algorithm.vote(vote)
@message_queue_task(type_=MessageQueueType.Worker)
async def complain_leader(self, vote_dumped: str) -> None:
vote_serialized = json.loads(vote_dumped)
version = self._blockchain.block_versioner.get_version(int(vote_serialized["blockHeight"], 16))
vote = Vote.get_leader_vote_class(version).deserialize(vote_serialized)
self._block_manager.add_complain(vote)
@message_queue_task
def get_invoke_result(self, tx_hash):
try:
invoke_result = self._block_manager.get_invoke_result(tx_hash)
invoke_result_str = json.dumps(invoke_result)
response_code = message_code.Response.success
logging.debug('invoke_result : ' + invoke_result_str)
util.apm_event(ChannelProperty().peer_id, {
'event_type': 'GetInvokeResult',
'peer_id': ChannelProperty().peer_id,
'peer_name': conf.PEER_NAME,
'channel_name': ChannelProperty().name,
'data': {'invoke_result': invoke_result, 'tx_hash': tx_hash}})
if 'code' in invoke_result:
if invoke_result['code'] == ScoreResponse.NOT_EXIST:
logging.debug(f"get invoke result NOT_EXIST tx_hash({tx_hash})")
response_code = message_code.Response.fail_invalid_key_error
elif invoke_result['code'] == ScoreResponse.NOT_INVOKED:
logging.info(f"get invoke result NOT_INVOKED tx_hash({tx_hash})")
response_code = message_code.Response.fail_tx_not_invoked
return response_code, invoke_result_str
except BaseException as e:
logging.error(f"get invoke result error : {e}")
util.apm_event(ChannelProperty().peer_id, {
'event_type': 'Error',
'peer_id': ChannelProperty().peer_id,
'peer_name': conf.PEER_NAME,
'channel_name': ChannelProperty().name,
'data': {
'error_type': 'InvokeResultError',
'code': message_code.Response.fail,
'message': f"get invoke result error : {e}"}})
return message_code.Response.fail, None
@message_queue_task
async def get_block_v2(self, block_height, block_hash) -> Tuple[int, str, str]:
# This is a temporary function for v2 support of exchanges.
block, block_hash, _, fail_response_code = await self.__get_block(block_hash, block_height)
if fail_response_code:
return fail_response_code, block_hash, json.dumps({})
tx_versioner = self._blockchain.tx_versioner
bs = BlockSerializer.new(block.header.version, tx_versioner)
block_data_dict = bs.serialize(block)
if block.header.height == 0:
return message_code.Response.success, block_hash, json.dumps(block_data_dict)
confirmed_tx_list_without_fail = []
for tx in block.body.transactions.values():
invoke_result = self._block_manager.get_invoke_result(tx.hash)
if 'failure' in invoke_result:
continue
ts = TransactionSerializer.new(tx.version, tx.type(), tx_versioner)
full_data = ts.to_full_data(tx)
if tx.version == "0x3":
step_used, step_price = int(invoke_result["stepUsed"], 16), int(invoke_result["stepPrice"], 16)
full_data["fee"] = hex(step_used * step_price)
confirmed_tx_list_without_fail.append(full_data)
# Replace the existing confirmed_transactions with v2 ver.
if block.header.version == "0.1a":
block_data_dict["confirmed_transaction_list"] = confirmed_tx_list_without_fail
else:
block_data_dict["transactions"] = confirmed_tx_list_without_fail
block_data_json = json.dumps(block_data_dict)
if fail_response_code:
return fail_response_code, block_hash, json.dumps({})
return message_code.Response.success, block_hash, block_data_json
@message_queue_task
async def get_block(self, block_height, block_hash) -> Tuple[int, str, bytes, str]:
block, block_hash, confirm_info, fail_response_code = await self.__get_block(block_hash, block_height)
if fail_response_code:
return fail_response_code, block_hash, b"", json.dumps({})
tx_versioner = self._blockchain.tx_versioner
bs = BlockSerializer.new(block.header.version, tx_versioner)
block_dict = bs.serialize(block)
return message_code.Response.success, block_hash, confirm_info, json.dumps(block_dict)
async def __get_block(self, block_hash, block_height):
if block_hash == "" and block_height == -1 and self._blockchain.last_block:
block_hash = self._blockchain.last_block.header.hash.hex()
block = None
confirm_info = b''
fail_response_code = None
if block_hash:
block = self._blockchain.find_block_by_hash(block_hash)
if block is None:
fail_response_code = message_code.Response.fail_wrong_block_hash
confirm_info = bytes()
else:
confirm_info = self._blockchain.find_confirm_info_by_hash(Hash32.fromhex(block_hash, True))
elif block_height != -1:
block = self._blockchain.find_block_by_height(block_height)
if block is None:
fail_response_code = message_code.Response.fail_wrong_block_height
confirm_info = bytes()
else:
confirm_info = self._blockchain.find_confirm_info_by_hash(block.header.hash)
else:
fail_response_code = message_code.Response.fail_wrong_block_hash
return block, block_hash, bytes(confirm_info), fail_response_code
@message_queue_task
def get_precommit_block(self, last_block_height: int):
precommit_block = self._blockchain.get_precommit_block()
if precommit_block is None:
return message_code.Response.fail, "there is no precommit block.", b""
if precommit_block.height != last_block_height + 1:
return message_code.Response.fail, "need block height sync.", b""
block_dumped = self._blockchain.block_dumps(precommit_block)
return message_code.Response.success, "success", block_dumped
@message_queue_task
def get_tx_by_address(self, address, index):
tx_list, next_index = self._blockchain.get_tx_list_by_address(address=address, index=index)
return tx_list, next_index
@message_queue_task
async def get_tx_proof(self, tx_hash: str) -> Union[list, dict]:
try:
proof = self._blockchain.get_transaction_proof(Hash32.fromhex(tx_hash))
except Exception as e:
return make_error_response(JsonError.INVALID_PARAMS, str(e))
try:
return make_proof_serializable(proof)
except Exception as e:
return make_error_response(JsonError.INTERNAL_ERROR, str(e))
@message_queue_task
async def prove_tx(self, tx_hash: str, proof: list) -> Union[str, dict]:
try:
proof = make_proof_deserializable(proof)
except Exception as e:
return make_error_response(JsonError.INTERNAL_ERROR, str(e))
try:
return "0x1" if self._blockchain.prove_transaction(Hash32.fromhex(tx_hash), proof) else "0x0"
except Exception as e:
return make_error_response(JsonError.INVALID_PARAMS, str(e))
@message_queue_task
async def get_receipt_proof(self, tx_hash: str) -> Union[list, dict]:
try:
proof = self._blockchain.get_receipt_proof(Hash32.fromhex(tx_hash))
except Exception as e:
return make_error_response(JsonError.INVALID_PARAMS, str(e))
try:
return make_proof_serializable(proof)
except Exception as e:
return make_error_response(JsonError.INTERNAL_ERROR, str(e))
@message_queue_task
async def prove_receipt(self, tx_hash: str, proof: list) -> Union[str, dict]:
try:
proof = make_proof_deserializable(proof)
except Exception as e:
return make_error_response(JsonError.INTERNAL_ERROR, str(e))
try:
return "0x1" if self._blockchain.prove_receipt(Hash32.fromhex(tx_hash), proof) else "0x0"
except Exception as e:
return make_error_response(JsonError.INVALID_PARAMS, str(e))
@message_queue_task
def reset_timer(self, key):
self._channel_service.timer_service.reset_timer(key)
@message_queue_task(type_=MessageQueueType.Worker)
def stop(self, message):
logging.info(f"channel_inner_service:stop message({message})")
self._channel_service.close()
class ChannelInnerService(MessageQueueService[ChannelInnerTask]):
TaskType = ChannelInnerTask
def __init__(self, amqp_target, route_key, username=None, password=None, **task_kwargs):
super().__init__(amqp_target, route_key, username, password, **task_kwargs)
self._task._citizen_condition_new_block = Condition(loop=self.loop)
self._task._citizen_condition_unregister = Condition(loop=self.loop)
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
def notify_new_block(self):
async def _notify_new_block():
condition = self._task._citizen_condition_new_block
async with condition:
condition.notify_all()
asyncio.run_coroutine_threadsafe(_notify_new_block(), self.loop)
def notify_unregister(self):
async def _notify_unregister():
condition = self._task._citizen_condition_unregister
async with condition:
condition.notify_all()
asyncio.run_coroutine_threadsafe(_notify_unregister(), self.loop)
def init_sub_services(self):
if self.loop != asyncio.get_event_loop():
raise Exception("Must call this function in thread of self.loop")
self._task.init_sub_service(self.loop)
def update_sub_services_properties(self, **properties):
self._task.update_sub_services_properties(**properties)
def cleanup(self):
if self.loop != asyncio.get_event_loop():
raise Exception("Must call this function in thread of self.loop")
self._task.cleanup_sub_services()
class ChannelInnerStub(MessageQueueStub[ChannelInnerTask]):
TaskType = ChannelInnerTask
def _callback_connection_lost_callback(self, connection: RobustConnection):
util.exit_and_msg("MQ Connection lost.")
def make_proof_serializable(proof: list):
proof_serializable = []
for item in proof:
try:
left = Hash32(item["left"])
proof_serializable.append({"left": left.hex_0x()})
except KeyError:
right = Hash32(item["right"])
proof_serializable.append({"right": right.hex_0x()})
return proof_serializable
def make_proof_deserializable(proof: list):
proof_deserializable = []
for item in proof:
try:
left: str = item["left"]
proof_deserializable.append({"left": Hash32.fromhex(left)})
except KeyError:
right: str = item["right"]
proof_deserializable.append({"right": Hash32.fromhex(right)})
return proof_deserializable
def make_error_response(code: int, message: str):
return {
"error": {
"code": code,
"message": message
}
}
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout, SeedLayoutDisplay, SeedConfirmDisplay
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:T8vPbnoUs5Ci... \t-> MWLEbTAW6...\n' +
'p2wpkh-p2sh:T8vPbnoUs5Ci... \t-> PStEWT3Zs...\n' +
'p2wpkh:T8vPbnoUs5Ci... \t-> mona1q7cg...')
# note: full key is T8vPbnoUs5CiEBHcnne1wXuR9V5ft16vRpuvqWTH83tFxT8Uacvn
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 490)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-ravencoin.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
if filename:
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
else:
msg = ""
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True, focused_widget=None):
self.set_layout(layout, title, next_enabled)
if focused_widget:
focused_widget.setFocus()
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi, config=self.config)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options, full=True):
slayout = SeedConfirmDisplay(
title=message,
is_seed=is_seed,
options=options,
parent=self,
config=self.config,
full_check=full
)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.seed_type, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
#if self.opt_slip39:
# options.append('slip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, seed_type, is_ext = self.seed_input(title, message, test, None, False)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text, electrum_seed_type):
title = _("Your wallet generation seed is:")
slayout = SeedLayoutDisplay(
seed=seed_text,
title=title,
options=['ext'],
msg=True,
config=self.config,
electrum_seed_type=electrum_seed_type
)
self.exec_layout(slayout)
if slayout.seed_type == 'electrum':
self.opt_bip39 = False # False
self.opt_ext = True # True
else:
self.opt_bip39 = True # False
self.opt_ext = True # True
self.seed = slayout.get_seed()
self.seed_type = slayout.seed_type
return slayout.is_ext #and slayout.seed_type == 'electrum'
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
pw_layout = PasswordLayout(
msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
pw_layout.encrypt_cb.setChecked(True)
try:
self.exec_layout(pw_layout.layout(), focused_widget=pw_layout.new_pw)
return pw_layout.new_password(), pw_layout.encrypt_cb.isChecked()
finally:
pw_layout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
#TODO: Implement
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(
xpub,
title=msg,
icon=False,
for_seed_words=False,
config=self.config,
)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
grab_api.py
|
# coding: utf-8
from grab import GrabMisuseError, GrabError
from grab.error import GrabTooManyRedirectsError
from grab.base import reset_request_counter
from test.util import build_grab
from test.util import BaseGrabTestCase
import six
import tempfile
import os
class GrabApiTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_incorrect_option_name(self):
g = build_grab()
self.assertRaises(GrabMisuseError, g.setup,
save_the_word=True)
def test_clone(self):
g = build_grab()
self.server.response['get.data'] = 'Moon'
g.go(self.server.get_url())
self.assertTrue(b'Moon' in g.response.body)
self.server.response['post.data'] = 'Foo'
g2 = g.clone(method='post', post='')
g2.go(self.server.get_url())
self.assertTrue(b'Foo' in g2.response.body)
def test_empty_clone(self):
g = build_grab()
g.clone()
def test_adopt(self):
g = build_grab()
self.server.response['get.data'] = 'Moon'
g.go(self.server.get_url())
g2 = build_grab()
self.assertEqual(g2.config['url'], None)
g2.adopt(g)
self.assertTrue(b'Moon' in g2.response.body)
self.assertEqual(g2.config['url'], self.server.get_url())
def test_empty_adopt(self):
g = build_grab()
g2 = build_grab()
g2.adopt(g)
def test_default_content_for_fake_response(self):
content = b'<strong>test</strong>'
g = build_grab(document_body=content)
self.assertEqual(g.response.body, content)
def test_inheritance(self):
from grab import Grab
class SimpleExtension(object):
data = {'counter': 0}
@classmethod
def get_data(cls):
return cls.data
class CustomGrab(Grab, SimpleExtension):
pass
SimpleExtension.get_data()['counter'] = 0
CustomGrab()
# self.assertEqual(SimpleExtension.get_data()['counter'], 1)
class VeryCustomGrab(CustomGrab):
pass
SimpleExtension.get_data()['counter'] = 0
VeryCustomGrab()
# self.assertEqual(SimpleExtension.get_data()['counter'], 1)
def test_request_counter(self):
import threading
reset_request_counter()
g = build_grab()
g.go(self.server.get_url())
self.assertEqual(g.request_counter, 1)
g.go(self.server.get_url())
self.assertEqual(g.request_counter, 2)
def func():
g = build_grab()
g.go(self.server.get_url())
# Make 10 requests in concurrent threads
threads = []
for x in six.moves.range(10):
th = threading.Thread(target=func)
threads.append(th)
th.start()
for th in threads:
th.join()
g.go(self.server.get_url())
self.assertEqual(g.request_counter, 13)
def test_download(self):
fd, path = tempfile.mkstemp()
g = build_grab()
self.server.response['get.data'] = 'FOO'
length = g.download(self.server.get_url(), path)
self.assertEqual(3, length)
os.unlink(path)
def test_make_url_absolute(self):
g = build_grab()
self.server.response['get.data'] = '<base href="http://foo/bar/">'
g.go(self.server.get_url())
absolute_url = g.make_url_absolute('/foobar', resolve_base=True)
self.assertEqual(absolute_url, 'http://foo/foobar')
g = build_grab()
absolute_url = g.make_url_absolute('/foobar')
self.assertEqual(absolute_url, '/foobar')
def test_error_request(self):
g = build_grab()
g.setup(post={'foo': 'bar'})
self.assertRaises(GrabError, g.go,
url='Could-not-resolve-host-address')
self.assertEqual(g.config['post'], None)
self.assertEqual(g.config['multipart_post'], None)
self.assertEqual(g.config['method'], None)
self.assertEqual(g.config['body_storage_filename'], None)
def test_setup_document(self):
data = b'''
<h1>test</h1>
'''
g = build_grab(data)
self.assertTrue(b'test' in g.doc.body)
def test_setup_document_invalid_input(self):
data = u'''
<h1>test</h1>
'''
self.assertRaises(GrabMisuseError, build_grab, data)
|
dataset.py
|
import tensorflow as tf
import numpy as np
import threading, time
import h5py
class Dataset():
def __init__(self, data_path, batch_size):
self.data_path = data_path
# Patch size for training
self.input_size = 41
self.label_size = 41
self.batch_size = batch_size
self.queue_size = 3000
self.open_h5py_file()
self.make_queue()
def open_h5py_file(self):
self.h5py_file = h5py.File('{}.h5'.format(self.data_path), 'r')
self.data_size = self.h5py_file['data'].shape[0]
self.data_index = self.data_size // self.batch_size
def make_queue(self):
self.input_t = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 1])
self.label_t = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, 1])
queue = tf.RandomShuffleQueue(
capacity=self.queue_size,
min_after_dequeue=self.batch_size,
dtypes=(tf.float32, tf.float32),
shapes=((self.input_size, self.input_size, 1), (self.label_size, self.label_size, 1)),
name = 'random_shuffle_queue'
)
self.enqueue_many = queue.enqueue_many([self.input_t, self.label_t])
self.dequeue_many = queue.dequeue_many(self.batch_size)
def start_enqueue_deamon(self, sess):
def enqueue_thread(sess):
while (True):
for (input_t, label_t) in self.generator():
sess.run([self.enqueue_many], feed_dict={
self.input_t: input_t,
self.label_t: label_t
})
time.sleep(0.0001)
thread_number = 1
threads = []
for i in range(thread_number):
t = threading.Thread(target=enqueue_thread, args=(sess,), daemon=True)
t.start()
threads.append(t)
return threads
def generator(self):
for i in range(self.data_index):
input_t = self.h5py_file['data'][i * self.batch_size : (i+1) * self.batch_size]
label_t = self.h5py_file['label'][i * self.batch_size : (i+1) * self.batch_size]
yield (input_t, label_t)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True,
rendezvous=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, rendezvous=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
hooks = []
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
raise TypeError('Mode PREDICT not yet supported in BROADCAST mode.')
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
return enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
all_hooks.extend(hooks)
if is_dataset:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(
key, tensor))
return predictions
def _validate_model_features_and_labels(self,
features,
labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: Tensor or a dictionary of Tensors
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if not isinstance(obj, ops.Tensor) and not isinstance(obj, dict):
raise TypeError(
'The {} to the model returned by input_fn must be either a Tensor '
'or a dictionary of Tensors. {}: {}'.format(obj_name, obj_name,
obj))
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for (key, tensor) in obj.items():
if not tensor.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static '
'shape. Key: \'{}\', Tensor: {}'.format(
obj_name, key, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`. Instead, the user can pass the training hooks as
an argument to `TPUEstimator.train()`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a `WarmStartSettings`,
then all variables are warm-started, and it is assumed
that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu,
eval_on_tpu)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
if mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for export_savedmodel(); '
'got {}.'.format(mode))
(super(TPUEstimator, self).
_add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables))
if self._export_to_tpu:
input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE:
input_receiver_fn_map[mode]}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
(super(TPUEstimator, self).
_add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags,
check_variables=False))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs)
)
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = []
for t in tensors:
if _is_tpu_tensor(t):
new_tensors.append(tpu_tensors_on_cpu.pop(0))
elif t is None:
new_tensors.append(None)
else:
# Only fetching `tpu_tensors_on_cpu` does not trigger
# TPU computation and blocks, so we add the control dependency here.
control_inputs = (tpu_tensors_on_cpu
if isinstance(tpu_tensors_on_cpu, (list, tuple))
else (tpu_tensors_on_cpu,))
with ops.control_dependencies(control_inputs):
new_tensors.append(array_ops.identity(t))
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict)
)
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'],
_BATCH_SIZE_KEY, batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn, hooks=hooks, steps=steps, max_steps=max_steps,
saving_listeners=saving_listeners
)
except Exception as e: # pylint: disable=broad-except
rendezvous.record_error('training_loop', e)
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn, steps=steps, hooks=hooks, checkpoint_path=checkpoint_path,
name=name
)
except Exception as e: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', e)
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=True):
yield result
except Exception as e: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', e)
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60*1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60*1000),
]
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
shutdown_hooks.append(session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks
))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency)
])
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret.get('eval_metrics', {}).items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode]),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode]),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_util._DatasetInitializerHook(iterator)
# pylint: enable=protected-access
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(iter(signals.keys())):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(iter(signals.keys()))]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
assets_extra,
as_text,
checkpoint_path,
strip_default_attrs)
|
test_07_concurrency.py
|
#!/usr/bin/python
from common import *
import logging
import sys
import time
import threading
import swampyer
logging.basicConfig(stream=sys.stdout, level=30)
# We want to see the protocol information
# being exchanged
#logging.basicConfig(stream=sys.stdout, level=1)
"""
The following function keeps track of the number concurrent
invocations are active based upon the `queue_name` keyword.
As the TRACKER and TRACKER_MAX are global we simply increment
went entering and decrement when going out. TRACKER_MAX just
detects when TRACKER has an amount greater than previously
recorded and we use that as an external means to detect when
we have more than the allowed number of concurrent invocations
active
"""
TRACKER = {}
TRACKER_MAX = {}
def simple_invoke(event, queue_name):
global TRACKER
TRACKER.setdefault(queue_name,0)
TRACKER_MAX.setdefault(queue_name,0)
TRACKER[queue_name] += 1
if TRACKER[queue_name] > TRACKER_MAX[queue_name]:
TRACKER_MAX[queue_name] = TRACKER[queue_name]
time.sleep(0.5)
TRACKER[queue_name] -= 1
return queue_name
CALL_ERRORS = {}
def simple_call(client, method):
def make_call():
try:
call_result = client.call('com.izaber.wamp.hello.'+method,method)
assert call_result == method
except swampyer.ExInvocationError as ex:
CALL_ERRORS.setdefault(method,0)
CALL_ERRORS[method] += 1
return make_call
def invoke_a_bunch(methods, iterations):
""" This will invoke each of the methods `iterations` times
in quick succession. Then waits for the threads to complete
"""
thread_list = []
for i in range(iterations):
for method in methods:
thr = threading.Thread(target=method)
thr.start()
thread_list.append(thr)
# Wait till it's done
for thr in thread_list:
thr.join()
def reset_trackers():
TRACKER.clear()
TRACKER_MAX.clear()
CALL_ERRORS.clear()
def test_connection():
# For concurrency, the queues must be defined in advance.
# This is partly because this forces the setting of the queue size before
# things can get messy
# For instance, if we allow queue sizes to be defined at registration call
# we could have conflicts like
#
# client.reg('foo',call,maxsize=1000000,concurrency_queue='default')
# client.reg('foo2',call,maxsize=1,concurrency_queue='default')
#
# Which is correct?
# So while the policy is a bit blunt, we force the definition of the
# queue size at session creation
client = connect_service(
concurrency_max=2,
timeout=60
)
client2 = connect_service(timeout=60)
# --------------------------------------------------------------
# Check if we can register
reg_result = client.register(
'com.izaber.wamp.hello.default',
simple_invoke,
details={
"force_reregister": True,
},
)
assert swampyer.WAMP_REGISTERED == reg_result
assert reg_result == swampyer.WAMP_REGISTERED
# Let's create a burst of data
invoke_a_bunch([simple_call(client2, 'default')],10)
# What was the maximum number of concurrent connections?
assert TRACKER_MAX['default'] == 2
# Let's unregister then
unreg_result = client.unregister(reg_result.registration_id)
assert unreg_result == swampyer.WAMP_UNREGISTERED
# --------------------------------------------------------------
# Okay, so let's register a new entry that can have unlimited
reg_result = client.register(
'com.izaber.wamp.hello.unlimited',
simple_invoke,
details={"force_reregister": True},
concurrency_queue="unlimited",
)
assert swampyer.WAMP_REGISTERED == reg_result
assert reg_result == swampyer.WAMP_REGISTERED
# Let's create a burst of data
invoke_a_bunch([simple_call(client2, 'unlimited')],10)
# What was the maximum number of concurrent connections?
# Since this was to an unlimited call queue, it should hit 10
assert TRACKER_MAX['unlimited'] == 10
# Unregister the previous function
unreg_result = client.unregister(reg_result.registration_id)
assert unreg_result == swampyer.WAMP_UNREGISTERED
# --------------------------------------------------------------
# Clear the trackers since we're going to do new
# a new set of runs
reset_trackers()
# Let's create another client with multiple queues
concurrency_configs = {
'just2': {
'concurrency_max': 2,
'queue_max': 10,
},
'just5': {
'concurrency_max': 5,
},
'just10': {
'concurrency_max': 10,
},
}
client3 = connect_service(concurrency_configs=concurrency_configs,timeout=60)
for k in concurrency_configs.keys():
reg_result = client3.register(
'com.izaber.wamp.hello.'+k,
simple_invoke,
details={"force_reregister": True},
concurrency_queue=k,
)
assert swampyer.WAMP_REGISTERED == reg_result
assert reg_result == swampyer.WAMP_REGISTERED
# Let's create a burst of data
invoke_a_bunch([
simple_call(client2,k) for k in concurrency_configs.keys()
],50)
# Match the max concurrency amounts with what we expect them to be
for k,v in concurrency_configs.items():
expected = v['concurrency_max']
assert TRACKER_MAX[k] <= expected, "Expected less than or equal to {} got {}".format(expected, TRACKER_MAX[k])
# Since we've put a queue_max on the just2 queue, we expect some
# errors as well
assert CALL_ERRORS['just2'] > 0
assert CALL_ERRORS.get('just5',0) == 0
assert CALL_ERRORS.get('just10',0) == 0
# --------------------------------------------------------------
# Let's amend the concurrency limits to new ones
concurrency_updates = {
'just2': 20,
'just5': 10,
'just10': 30,
}
for queue_name, new_limit in concurrency_updates.items():
concurrency_queue = client3.concurrency_queue_get(queue_name)
concurrency_queue.configure(concurrency_max=new_limit)
# Let's create a burst of data
invoke_a_bunch([
simple_call(client2,k) for k in concurrency_configs.keys()
],50)
# Match the max concurrency amounts with what we expect them to be
for k,v in concurrency_updates.items():
assert TRACKER_MAX[k] <= v, "Expected less than or equal to {} got {}".format(v, TRACKER_MAX[k])
# --------------------------------------------------------------
# By default we don't allow auto creation of new concurrency queues
def unmadequeue():
return client3.register(
'com.izaber.wamp.hello.fake',
simple_invoke,
details={"force_reregister": True},
concurrency_queue='unmadequeue',
)
try:
unmadequeue()
raise Exception("What, this shouldn't happen")
except Exception as ex:
assert isinstance(ex, swampyer.ExNotImplemented)
# --------------------------------------------------------------
# But it we update the client to allow it, it should happen
client3.configure(concurrency_strict_naming=False)
reg_result = unmadequeue()
assert swampyer.WAMP_REGISTERED == reg_result
assert reg_result == swampyer.WAMP_REGISTERED
# Then shutdown
client.shutdown()
client2.shutdown()
client3.shutdown()
if __name__ == '__main__':
test_connection()
|
youtubequeue.py
|
import os
import settings
settings.generateConfigFile()
import soundfile as sf
from pydub import AudioSegment
import generatorclient
from time import sleep
from subprocess import *
import videouploader
from threading import Thread
import pickle
import datetime
from datetime import timedelta
import subprocess
import videoscript
import random
from moviepy.editor import *
# 18:00 19:00 20:00 23:00 00:00 01:00
waitTill = None
scriptIBuffer = []
def loadVideoScripts():
vidsaves = os.listdir(settings.rawvideosaves)
print(vidsaves)
for vid in vidsaves:
path = settings.rawvideosaves + "/" + vid
with open(path, 'rb') as pickle_file:
script = pickle.load(pickle_file)
videoscript.videoscripts.append(script)
def parseScripts():
if scriptIBuffer:
for script in scriptIBuffer:
scriptno = script[0]
print("Parsing Raw Script %s" % scriptno)
scripttitle = script[1]
author = script[2]
ups = script[3]
payload = script[4]
final_script = payload[0]
videotype = payload[1]
video_settings = payload[2]
music_type = payload[3]
thumbnail = payload[4]
characters_amount = payload[5]
youtube_title = payload[6]
youtube_description = payload[7]
youtube_tags = payload[8]
videoscript.VideoScriptEngine(scriptno, scripttitle, author, ups, final_script, videotype, video_settings,
music_type, thumbnail, characters_amount, youtube_title, youtube_description,
youtube_tags)
scriptIBuffer.clear()
else:
print("VIDEO GENERATOR no scripts to parse")
def uploadVids():
pass
"""
if renderedVids:
for vid in renderedVids:
vid.generateMovie()
renderedVids.clear()
loadVideoScripts()
"""
def canUpload():
if generatorclient.last_upload_times is not None:
if generatorclient.last_upload_times == 0:
return settings.uploads_a_day
now = datetime.datetime.now()
vids_within_day = 0
for time in generatorclient.last_upload_times:
time = time[0]
if now.hour >= settings.youtube_api_quota_reset_hour:
if time > now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0):
vids_within_day += 1
else:
if time >= now - timedelta(days=1):
vids_within_day += 1
print("%s Videos uploaded since %s:00" % (vids_within_day, settings.youtube_api_quota_reset_hour))
print("Estimated quote usage %s" % (vids_within_day * 1658))
return settings.uploads_a_day - vids_within_day
return False
def tickThread():
global waitTill
while True:
sleep(30)
if generatorclient.last_upload_times is None and not generatorclient.isRequestingScripts:
print("No update times available... requesting more")
generatorclient.getLastUploadedScripts()
sleep(5)
if videoscript.videoscripts:
print("Rendering all video scripts...")
for script in videoscript.videoscripts:
script.renderVideo()
if waitTill is not None:
if datetime.datetime.now() > waitTill:
waitTill = None
else:
print("Out of Quote Response... waiting till %s" % waitTill)
if waitTill is None:
amount_to_upload = canUpload()
if type(amount_to_upload) is int:
scripts_available_to_upload = [script for i, script in enumerate(videoscript.videoscripts) if
script.isRendered]
print("Allowed to upload %s videos" % amount_to_upload)
if amount_to_upload > len(scripts_available_to_upload):
amount_to_upload = len(scripts_available_to_upload)
print("Only %s scripts available to upload" % amount_to_upload)
print("Uploading %s video scripts... %s ready to upload (total %s)" % (
amount_to_upload, amount_to_upload, len(videoscript.videoscripts)))
for i in range(0, amount_to_upload, 1):
upload = scripts_available_to_upload[i].uploadVideo()
try:
if upload is False:
now = datetime.datetime.now()
if now.hour > settings.youtube_api_quota_reset_hour:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0) + timedelta(days=1)
else:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0)
except Exception as e:
print(e)
pass
generatorclient.last_upload_times = None
elif type(amount_to_upload) is bool:
print("Can't get last update times")
else:
print("Estimated out of quotes waiting till %s" % waitTill)
else:
print("No video scripts, just chilling...")
if not generatorclient.isRequestingScripts:
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
def initQueue():
## process = subprocess.call("wine /home/royalreddit/Desktop/balcon/balcon.exe -t supnerds -w /home/royalreddit/Desktop/test2.wav", shell = True)
if not os.path.exists(settings.videoqueue_directory):
os.mkdir(settings.videoqueue_directory)
if not os.path.exists(settings.rawvideosaves):
os.mkdir(settings.rawvideosaves)
if not os.path.exists(settings.finishedvideosdirectory):
os.mkdir(settings.finishedvideosdirectory)
loadVideoScripts()
generatorclient.connectToServer()
sleep(2)
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
thread = Thread(target=tickThread)
thread.start()
# uploadVids()
if __name__ == "__main__":
"""
main_vid_combined = concatenate_videoclips([VideoFileClip("%s/Intervals/interval1.mp4" % settings.assetPath), VideoFileClip("%s/Intervals/interval2.mp4" % settings.assetPath)])
asd = main_vid_combined.set_audio(CompositeAudioClip([AudioFileClip("%s/music-loop.wav" % settings.tempPath), AudioFileClip("%s/music-loop2.wav" % settings.tempPath)]))
asd.write_videofile("%s/lol.mp4" % settings.currentPath, threads=4,
fps=settings.movieFPS, temp_audiofile=settings.currentPath + "\\temp.mp3")
#print(asd)
#audio_clip = AudioSegment.from_wav("C:/Users/Thomas Shaer/Desktop/Youtube Bot Experimental/Youtube Bot Video Generator/Temp/tempaudio21.wav")
"""
if not settings.exportOffline:
videouploader.get_credentials()
else:
print("Video Generator launching in export offline mode")
initQueue()
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support.script_helper import assert_python_ok
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
def test_total_ordering_for_metaclasses_issue_44605(self):
@functools.total_ordering
class SortableMeta(type):
def __new__(cls, name, bases, ns):
return super().__new__(cls, name, bases, ns)
def __lt__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ < other.__name__
def __eq__(self, other):
if not isinstance(other, SortableMeta):
pass
return self.__name__ == other.__name__
class B(metaclass=SortableMeta):
pass
class A(metaclass=SortableMeta):
pass
self.assertTrue(A < B)
self.assertFalse(A > B)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(metaclass=abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.__dict__['add'].__isabstractmethod__)
with self.assertRaises(TypeError):
Abstract()
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_staticmethod_type_ann_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register
@staticmethod
def _(arg: int):
return isinstance(arg, int)
@t.register
@staticmethod
def _(arg: str):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_type_ann_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register
@classmethod
def _(cls, arg: int):
return cls("int")
@t.register
@classmethod
def _(cls, arg: str):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_method_wrapping_attributes(self):
class A:
@functools.singledispatchmethod
def func(self, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@classmethod
def cls_func(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
@functools.singledispatchmethod
@staticmethod
def static_func(arg: int) -> str:
"""My function docstring"""
return str(arg)
for meth in (
A.func,
A().func,
A.cls_func,
A().cls_func,
A.static_func,
A().static_func
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(A.func.__name__, 'func')
self.assertEqual(A().func.__name__, 'func')
self.assertEqual(A.cls_func.__name__, 'cls_func')
self.assertEqual(A().cls_func.__name__, 'cls_func')
self.assertEqual(A.static_func.__name__, 'static_func')
self.assertEqual(A().static_func.__name__, 'static_func')
def test_double_wrapped_methods(self):
def classmethod_friendly_decorator(func):
wrapped = func.__func__
@classmethod
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
return wrapped(*args, **kwargs)
return wrapper
class WithoutSingleDispatch:
@classmethod
@contextlib.contextmanager
def cls_context_manager(cls, arg: int) -> str:
try:
yield str(arg)
finally:
return 'Done'
@classmethod_friendly_decorator
@classmethod
def decorated_classmethod(cls, arg: int) -> str:
return str(arg)
class WithSingleDispatch:
@functools.singledispatchmethod
@classmethod
@contextlib.contextmanager
def cls_context_manager(cls, arg: int) -> str:
"""My function docstring"""
try:
yield str(arg)
finally:
return 'Done'
@functools.singledispatchmethod
@classmethod_friendly_decorator
@classmethod
def decorated_classmethod(cls, arg: int) -> str:
"""My function docstring"""
return str(arg)
# These are sanity checks
# to test the test itself is working as expected
with WithoutSingleDispatch.cls_context_manager(5) as foo:
without_single_dispatch_foo = foo
with WithSingleDispatch.cls_context_manager(5) as foo:
single_dispatch_foo = foo
self.assertEqual(without_single_dispatch_foo, single_dispatch_foo)
self.assertEqual(single_dispatch_foo, '5')
self.assertEqual(
WithoutSingleDispatch.decorated_classmethod(5),
WithSingleDispatch.decorated_classmethod(5)
)
self.assertEqual(WithSingleDispatch.decorated_classmethod(5), '5')
# Behavioural checks now follow
for method_name in ('cls_context_manager', 'decorated_classmethod'):
with self.subTest(method=method_name):
self.assertEqual(
getattr(WithSingleDispatch, method_name).__name__,
getattr(WithoutSingleDispatch, method_name).__name__
)
self.assertEqual(
getattr(WithSingleDispatch(), method_name).__name__,
getattr(WithoutSingleDispatch(), method_name).__name__
)
for meth in (
WithSingleDispatch.cls_context_manager,
WithSingleDispatch().cls_context_manager,
WithSingleDispatch.decorated_classmethod,
WithSingleDispatch().decorated_classmethod
):
with self.subTest(meth=meth):
self.assertEqual(meth.__doc__, 'My function docstring')
self.assertEqual(meth.__annotations__['arg'], int)
self.assertEqual(
WithSingleDispatch.cls_context_manager.__name__,
'cls_context_manager'
)
self.assertEqual(
WithSingleDispatch().cls_context_manager.__name__,
'cls_context_manager'
)
self.assertEqual(
WithSingleDispatch.decorated_classmethod.__name__,
'decorated_classmethod'
)
self.assertEqual(
WithSingleDispatch().decorated_classmethod.__name__,
'decorated_classmethod'
)
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
def test_register_genericalias(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int], lambda arg: "types.GenericAlias")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[int], lambda arg: "typing.GenericAlias")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Union[list[int], str], lambda arg: "typing.Union[types.GenericAlias]")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Union[typing.List[float], bytes], lambda arg: "typing.Union[typing.GenericAlias]")
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Any, lambda arg: "typing.Any")
self.assertEqual(f([1]), "default")
self.assertEqual(f([1.0]), "default")
self.assertEqual(f(""), "default")
self.assertEqual(f(b""), "default")
def test_register_genericalias_decorator(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(list[int])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.List[int])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Union[list[int], str])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Union[typing.List[int], str])
with self.assertRaisesRegex(TypeError, "Invalid first argument to "):
f.register(typing.Any)
def test_register_genericalias_annotation(self):
@functools.singledispatch
def f(arg):
return "default"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: list[int]):
return "types.GenericAlias"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.List[float]):
return "typing.GenericAlias"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.Union[list[int], str]):
return "types.UnionType(types.GenericAlias)"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.Union[typing.List[float], bytes]):
return "typing.Union[typing.GenericAlias]"
with self.assertRaisesRegex(TypeError, "Invalid annotation for 'arg'"):
@f.register
def _(arg: typing.Any):
return "typing.Any"
self.assertEqual(f([1]), "default")
self.assertEqual(f([1.0]), "default")
self.assertEqual(f(""), "default")
self.assertEqual(f(b""), "default")
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT,
account_balance, first_channel_id, basic_fee, TEST_NETWORK,
EXPERIMENTAL_FEATURES,
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = basic_fee(3750) if not chainparams['elements'] else 4477
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-fast-gossip
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@pytest.mark.slow_test
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if node_factory.valgrind:
num_nodes -= 4 # => 15 (135 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey']['addresses'][0]
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = basic_fee(mid)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537 if not chainparams['elements'] else 33870
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233 if not chainparams['elements'] else 33366
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
def test_closing_negotiation_step_50pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 50%, the default"""
opts = {}
opts['fee_negotiation_step'] = '50%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20334 if not chainparams['elements'] else 33533
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20334 if not chainparams['elements'] else 33533
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001 if not chainparams['elements'] else 32985
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 33808
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989 if not chainparams['elements'] else 34621
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010 if not chainparams['elements'] else 32995
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151 if not chainparams['elements'] else 33459
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 33746
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': coin_mvt_plugin,
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 15000, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 15000, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv_2['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=2) # OUR_PENALTY_TX + OUR_HTLC_TIMEOUT_TO_US
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
@unittest.skipIf(not DEVELOPER, "uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .* of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = txout['scriptPubKey']['addresses'][0]
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT-4e329271a358ee52bf43ddbd96776943c5d74508 #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
if EXPERIMENTAL_FEATURES:
xsuccess = valid
xfail = invalid
else:
xsuccess = []
xfail = valid + invalid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(xfail) + len(xsuccess))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in xsuccess:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in xfail:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
|
DipPy.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\GUI.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
###############################################################################
#
# Beginning of code insertion
import sys
import time
import numpy as np
import Phidget22
from Phidget22.Devices.Stepper import *
from Phidget22.PhidgetException import *
from Phidget22.Phidget import *
from Phidget22.Net import *
import asyncio
from PyQt5.QtWidgets import QApplication, QProgressBar
from quamash import QEventLoop, QThreadExecutor
try:
ch = Stepper()
except RuntimeError as e:
print("Runtime Exception %s" % e.details)
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
def StepperAttached(e):
try:
attached = e
print("\nAttach Event Detected (Information Below)")
print("===========================================")
print("Library Version: %s" % attached.getLibraryVersion())
print("Serial Number: %d" % attached.getDeviceSerialNumber())
print("Channel: %d" % attached.getChannel())
print("Channel Class: %s" % attached.getChannelClass())
print("Channel Name: %s" % attached.getChannelName())
print("Device ID: %d" % attached.getDeviceID())
print("Device Version: %d" % attached.getDeviceVersion())
print("Device Name: %s" % attached.getDeviceName())
print("Device Class: %d" % attached.getDeviceClass())
print("\n")
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
def StepperDetached(e):
detached = e
try:
print("\nDetach event on Port %d Channel %d" % (detached.getHubPort(), detached.getChannel()))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
def ErrorEvent(e, eCode, description):
print("Error %i : %s" % (eCode, description))
def PositionChangeHandler(e, position):
print("Position: %f" % position, "Speed: ", ch.getVelocity())
ui.PositionLineEdit.setText(str(np.around(position/60,decimals=3)))
#print( ch.getVelocity() )
try:
ch.setOnAttachHandler(StepperAttached)
ch.setOnDetachHandler(StepperDetached)
ch.setOnErrorHandler(ErrorEvent)
ch.setOnPositionChangeHandler(PositionChangeHandler)
print("Waiting for the Phidget Stepper Object to be attached...")
ch.setDeviceLabel("dip_coater")
ch.openWaitForAttachment(1000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
time.sleep(1)
StepsPerRevolution = 200
MicrosteppingDivision = 16 # Microstepping at 1/16th
LeadscrewPitch = 1.0 # millimeters per rotation
GearingRatio = 76 + (49/64) # :1
SecondsPerMinute = 60 # i.e. per minute
MicrostepsPerMillimeter = StepsPerRevolution * MicrosteppingDivision * GearingRatio / LeadscrewPitch
RescaleFactor = SecondsPerMinute / (MicrostepsPerMillimeter)
ch.setRescaleFactor( RescaleFactor ) # All units are now in millimeters and minutes
print("Rescale Factor: ", ch.getRescaleFactor(), "\n")
# Supply, 20V DC rated at 3.25A
# Reccommended motor current, 2.8A (at 12V)
ch.setCurrentLimit( 2.8 ) # Current limit in Amps
# Motor datasheet, 25rpm max
MaxVelocityMicrostepsPerSecond = (25/60) * StepsPerRevolution * MicrosteppingDivision * GearingRatio
MaxVelocity = 50
ch.setVelocityLimit( MaxVelocity )
MaxAcceleration = 100
ch.setAcceleration( MaxAcceleration )
StepResolution = LeadscrewPitch / (GearingRatio * StepsPerRevolution)
print("\n")
print("Step resolution: ")
#print(StepResolution, "mm")
#print(StepResolution*1E3, "um")
print(StepResolution*1E6, "nm\n")
MicrostepResolution = LeadscrewPitch / (GearingRatio * StepsPerRevolution * MicrosteppingDivision)
print("Microstep resolution: ")
#print(MicrostepResolution, "mm")
#print(MicrostepResolution*1E3, "um")
print(MicrostepResolution*1E6, "nm\n")
ch.setControlMode(1) # Continuous movement
# End of code insertion
#
###############################################################################
class Ui_MainWindow(object):
operation_tag = None
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1127, 961)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setAutoFillBackground(True)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_4.setObjectName("verticalLayout_4")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.ManualControlLabel = QtWidgets.QLabel(self.centralwidget)
self.ManualControlLabel.setObjectName("ManualControlLabel")
self.horizontalLayout_5.addWidget(self.ManualControlLabel)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem2)
self.verticalLayout_4.addLayout(self.horizontalLayout_5)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.EngageButton = QtWidgets.QPushButton(self.centralwidget)
self.EngageButton.setObjectName("EngageButton")
self.verticalLayout_3.addWidget(self.EngageButton)
self.DisengageButton = QtWidgets.QPushButton(self.centralwidget)
self.DisengageButton.setObjectName("DisengageButton")
self.verticalLayout_3.addWidget(self.DisengageButton)
self.horizontalLayout_4.addLayout(self.verticalLayout_3)
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout_4.addWidget(self.line_3)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.VelocityLimitLabel = QtWidgets.QLabel(self.centralwidget)
self.VelocityLimitLabel.setObjectName("VelocityLimitLabel")
self.horizontalLayout_3.addWidget(self.VelocityLimitLabel)
self.ManualVelocitySpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.ManualVelocitySpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ManualVelocitySpinBox.setDecimals(3)
self.ManualVelocitySpinBox.setMinimum(0.001)
self.ManualVelocitySpinBox.setProperty("value", 10.0)
self.ManualVelocitySpinBox.setObjectName("ManualVelocitySpinBox")
self.horizontalLayout_3.addWidget(self.ManualVelocitySpinBox)
self.VelocityLimitUnits = QtWidgets.QLabel(self.centralwidget)
self.VelocityLimitUnits.setObjectName("VelocityLimitUnits")
self.horizontalLayout_3.addWidget(self.VelocityLimitUnits)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.AcceleratioLabel = QtWidgets.QLabel(self.centralwidget)
self.AcceleratioLabel.setObjectName("AcceleratioLabel")
self.horizontalLayout_2.addWidget(self.AcceleratioLabel)
self.ManualAccelerationSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.ManualAccelerationSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ManualAccelerationSpinBox.setDecimals(3)
self.ManualAccelerationSpinBox.setMinimum(0.001)
self.ManualAccelerationSpinBox.setProperty("value", 5.0)
self.ManualAccelerationSpinBox.setObjectName("ManualAccelerationSpinBox")
self.horizontalLayout_2.addWidget(self.ManualAccelerationSpinBox)
self.AccelerationUnits = QtWidgets.QLabel(self.centralwidget)
self.AccelerationUnits.setObjectName("AccelerationUnits")
self.horizontalLayout_2.addWidget(self.AccelerationUnits)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem4)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_4.addLayout(self.verticalLayout_2)
self.line_6 = QtWidgets.QFrame(self.centralwidget)
self.line_6.setFrameShape(QtWidgets.QFrame.VLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.horizontalLayout_4.addWidget(self.line_6)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.ManualUpButton = QtWidgets.QPushButton(self.centralwidget)
self.ManualUpButton.setObjectName("ManualUpButton")
self.verticalLayout.addWidget(self.ManualUpButton)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.PositionLabel = QtWidgets.QLabel(self.centralwidget)
self.PositionLabel.setObjectName("PositionLabel")
self.horizontalLayout.addWidget(self.PositionLabel)
self.PositionLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.PositionLineEdit.setAcceptDrops(False)
self.PositionLineEdit.setFrame(False)
self.PositionLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.PositionLineEdit.setObjectName("PositionLineEdit")
self.horizontalLayout.addWidget(self.PositionLineEdit)
self.PositionUnits = QtWidgets.QLabel(self.centralwidget)
self.PositionUnits.setObjectName("PositionUnits")
self.horizontalLayout.addWidget(self.PositionUnits)
self.ZeroButton = QtWidgets.QPushButton(self.centralwidget)
self.ZeroButton.setObjectName("ZeroButton")
self.horizontalLayout.addWidget(self.ZeroButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.ManualDownButton = QtWidgets.QPushButton(self.centralwidget)
self.ManualDownButton.setObjectName("ManualDownButton")
self.verticalLayout.addWidget(self.ManualDownButton)
self.horizontalLayout_4.addLayout(self.verticalLayout)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_4.addWidget(self.line)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem5)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem6)
self.ProgrammedMoveLabel = QtWidgets.QLabel(self.centralwidget)
self.ProgrammedMoveLabel.setObjectName("ProgrammedMoveLabel")
self.horizontalLayout_10.addWidget(self.ProgrammedMoveLabel)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem7)
self.verticalLayout_4.addLayout(self.horizontalLayout_10)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.ProgrammedUpButton = QtWidgets.QPushButton(self.centralwidget)
self.ProgrammedUpButton.setObjectName("ProgrammedUpButton")
self.verticalLayout_5.addWidget(self.ProgrammedUpButton)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.StrokeLengthLabel = QtWidgets.QLabel(self.centralwidget)
self.StrokeLengthLabel.setObjectName("StrokeLengthLabel")
self.horizontalLayout_6.addWidget(self.StrokeLengthLabel)
self.StrokeLengthSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.StrokeLengthSpinBox.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly)
self.StrokeLengthSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.StrokeLengthSpinBox.setDecimals(3)
self.StrokeLengthSpinBox.setMaximum(300.0)
self.StrokeLengthSpinBox.setProperty("value", 80.0)
self.StrokeLengthSpinBox.setObjectName("StrokeLengthSpinBox")
self.horizontalLayout_6.addWidget(self.StrokeLengthSpinBox)
self.StrokeLengthUnits = QtWidgets.QLabel(self.centralwidget)
self.StrokeLengthUnits.setObjectName("StrokeLengthUnits")
self.horizontalLayout_6.addWidget(self.StrokeLengthUnits)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem8)
self.verticalLayout_5.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.RampDistanceLabel = QtWidgets.QLabel(self.centralwidget)
self.RampDistanceLabel.setObjectName("RampDistanceLabel")
self.horizontalLayout_7.addWidget(self.RampDistanceLabel)
self.RampDistanceSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.RampDistanceSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.RampDistanceSpinBox.setDecimals(4)
self.RampDistanceSpinBox.setMinimum(0.0001)
self.RampDistanceSpinBox.setMaximum(300.0)
self.RampDistanceSpinBox.setProperty("value", 1.0)
self.RampDistanceSpinBox.setObjectName("RampDistanceSpinBox")
self.horizontalLayout_7.addWidget(self.RampDistanceSpinBox)
self.RampDistanceUnits = QtWidgets.QLabel(self.centralwidget)
self.RampDistanceUnits.setObjectName("RampDistanceUnits")
self.horizontalLayout_7.addWidget(self.RampDistanceUnits)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem9)
self.verticalLayout_5.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.FinalSpeedLabel = QtWidgets.QLabel(self.centralwidget)
self.FinalSpeedLabel.setObjectName("FinalSpeedLabel")
self.horizontalLayout_8.addWidget(self.FinalSpeedLabel)
self.FinalSpeedSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.FinalSpeedSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.FinalSpeedSpinBox.setDecimals(3)
self.FinalSpeedSpinBox.setMinimum(0.001)
self.FinalSpeedSpinBox.setProperty("value", 10.0)
self.FinalSpeedSpinBox.setObjectName("FinalSpeedSpinBox")
self.horizontalLayout_8.addWidget(self.FinalSpeedSpinBox)
self.FinalSpeedUnits = QtWidgets.QLabel(self.centralwidget)
self.FinalSpeedUnits.setObjectName("FinalSpeedUnits")
self.horizontalLayout_8.addWidget(self.FinalSpeedUnits)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem10)
self.verticalLayout_5.addLayout(self.horizontalLayout_8)
self.ProgrammedDownButton = QtWidgets.QPushButton(self.centralwidget)
self.ProgrammedDownButton.setObjectName("ProgrammedDownButton")
self.verticalLayout_5.addWidget(self.ProgrammedDownButton)
self.horizontalLayout_12.addLayout(self.verticalLayout_5)
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.horizontalLayout_12.addWidget(self.line_4)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
spacerItem11 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem11)
self.CalculateButton = QtWidgets.QPushButton(self.centralwidget)
self.CalculateButton.setObjectName("CalculateButton")
self.verticalLayout_6.addWidget(self.CalculateButton)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.StrokeTimeLabel = QtWidgets.QLabel(self.centralwidget)
self.StrokeTimeLabel.setObjectName("StrokeTimeLabel")
self.horizontalLayout_9.addWidget(self.StrokeTimeLabel)
self.StrokeTimeLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.StrokeTimeLineEdit.setFrame(False)
self.StrokeTimeLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.StrokeTimeLineEdit.setObjectName("StrokeTimeLineEdit")
self.horizontalLayout_9.addWidget(self.StrokeTimeLineEdit)
self.StrokeTimeUnits = QtWidgets.QLabel(self.centralwidget)
self.StrokeTimeUnits.setObjectName("StrokeTimeUnits")
self.horizontalLayout_9.addWidget(self.StrokeTimeUnits)
self.verticalLayout_6.addLayout(self.horizontalLayout_9)
spacerItem12 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem12)
self.horizontalLayout_12.addLayout(self.verticalLayout_6)
self.verticalLayout_4.addLayout(self.horizontalLayout_12)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_4.addWidget(self.line_2)
spacerItem13 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem13)
self.verticalLayout_7.addLayout(self.verticalLayout_4)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.EngageButton, self.DisengageButton)
MainWindow.setTabOrder(self.DisengageButton, self.ManualVelocitySpinBox)
MainWindow.setTabOrder(self.ManualVelocitySpinBox, self.ManualAccelerationSpinBox)
MainWindow.setTabOrder(self.ManualAccelerationSpinBox, self.ManualUpButton)
MainWindow.setTabOrder(self.ManualUpButton, self.PositionLineEdit)
MainWindow.setTabOrder(self.PositionLineEdit, self.ZeroButton)
MainWindow.setTabOrder(self.ZeroButton, self.ManualDownButton)
MainWindow.setTabOrder(self.ManualDownButton, self.ProgrammedUpButton)
MainWindow.setTabOrder(self.ProgrammedUpButton, self.StrokeLengthSpinBox)
MainWindow.setTabOrder(self.StrokeLengthSpinBox, self.RampDistanceSpinBox)
MainWindow.setTabOrder(self.RampDistanceSpinBox, self.FinalSpeedSpinBox)
MainWindow.setTabOrder(self.FinalSpeedSpinBox, self.ProgrammedDownButton)
MainWindow.setTabOrder(self.ProgrammedDownButton, self.CalculateButton)
MainWindow.setTabOrder(self.CalculateButton, self.StrokeTimeLineEdit)
#######################################################################
#
# Beginning of code insertion
self.ManualAccelerationSpinBox.setMinimum(0.001)
self.ManualAccelerationSpinBox.setProperty( "value", MaxAcceleration/2 )
self.ManualAccelerationSpinBox.setMaximum( MaxAcceleration )
self.ManualVelocitySpinBox.setMinimum(0.001)
self.ManualVelocitySpinBox.setProperty( "value", MaxVelocity/2 )
self.ManualVelocitySpinBox.setMaximum( MaxVelocity )
self.FinalSpeedSpinBox.setProperty("value", MaxVelocity/2 )
self.FinalSpeedSpinBox.setMaximum( MaxVelocity )
self.EngageButton.clicked.connect(self.clickedEngageButton)
self.DisengageButton.clicked.connect(self.clickedDisengageButton)
self.ManualUpButton.pressed.connect(self.pressedManualUpButton)
self.ManualUpButton.released.connect(self.releasedManualUpButton)
self.ManualDownButton.pressed.connect(self.pressedManualDownButton)
self.ManualDownButton.released.connect(self.releasedManualDownButton)
self.ZeroButton.clicked.connect(self.clickedZeroButton)
self.CalculateButton.clicked.connect(self.clickedCalculateButton)
self.ProgrammedUpButton.clicked.connect(self.clickedProgrammedUp)
self.ProgrammedDownButton.clicked.connect(self.clickedProgrammedDown)
def clickedEngageButton(self):
ch.setTargetPosition( ch.getPosition() )
ch.setEngaged(1)
def clickedDisengageButton(self):
ch.setEngaged(0)
ch.setTargetPosition( ch.getPosition() )
def pressedManualUpButton(self):
ch.setVelocityLimit(0)
ch.setAcceleration ( self.ManualAccelerationSpinBox.value() )
ch.setEngaged(1)
ch.setVelocityLimit( self.ManualVelocitySpinBox.value() )
def releasedManualUpButton(self):
ch.setVelocityLimit(0)
ch.setTargetPosition( ch.getPosition() )
def pressedManualDownButton(self):
ch.setEngaged(1)
ch.setVelocityLimit(0)
ch.setAcceleration ( self.ManualAccelerationSpinBox.value() )
ch.setVelocityLimit( -self.ManualVelocitySpinBox.value() )
def releasedManualDownButton(self):
ch.setVelocityLimit(0)
ch.setTargetPosition( ch.getPosition() )
def clickedZeroButton(self):
ch.addPositionOffset( -ch.getPosition() )
self.PositionLineEdit.setText("0.000")
def clickedCalculateButton(self):
if self.RampDistanceSpinBox.value() > self.StrokeLengthSpinBox.value():
self.RampDistanceSpinBox.setProperty("value", self.StrokeLengthSpinBox.value() )
RampTime = 2 * self.RampDistanceSpinBox.value() / self.FinalSpeedSpinBox.value()
Acceleration = self.FinalSpeedSpinBox.value() / RampTime
ConstantVelocityDistance = self.StrokeLengthSpinBox.value() - self.RampDistanceSpinBox.value()
ConstantVelocityTime = ConstantVelocityDistance / self.FinalSpeedSpinBox.value()
TotalTime = ConstantVelocityTime + RampTime
self.StrokeTimeLineEdit.setText(str(np.around(TotalTime, decimals=2)))
return [TotalTime*60, Acceleration/60]
async def scheduled_disengage(self,time,tag=None):
await asyncio.sleep(time)
if tag is not None:
if tag == self.operation_tag:
self.clickedDisengageButton()
else:
self.clickedDisengageButton()
def clickedProgrammedUp(self):
self.clickedCalculateButton()
self.clickedEngageButton()
[Time,Acceleration] = self.clickedCalculateButton()
ch.setAcceleration(Acceleration)
ch.setVelocityLimit(self.FinalSpeedSpinBox.value())
print("Waiting for ", Time, " seconds")
event_loop = asyncio.get_event_loop()
# event_loop.run_in_executor(None,lambda :ProgrammedDisengage(self,Time))
tag = time.clock()
self.operation_tag=tag
event_loop.create_task(self.scheduled_disengage(Time, tag))
# await ProgrammedDisengage(self,Time)
# BackgroundThread = QtCore.QtThread(target=ProgrammedDisengage(self,Time), args=(4,))
# BackgroundThread.start()
def clickedProgrammedDown(self):
self.clickedCalculateButton()
self.clickedEngageButton()
[Time,Acceleration] = self.clickedCalculateButton()
ch.setAcceleration(Acceleration)
ch.setVelocityLimit(-self.FinalSpeedSpinBox.value())
print("Waiting for ", Time, " seconds")
event_loop = asyncio.get_event_loop()
# event_loop.run_in_executor(None,lambda :ProgrammedDisengage(self,Time))
tag = time.clock()
self.operation_tag=tag
event_loop.create_task(self.scheduled_disengage(Time, tag))
# await ProgrammedDisengage(self,Time)
# BackgroundThread = QtCore.QtThread(target=ProgrammedDisengage(self,Time), args=(4,))
# BackgroundThread.start()
# End code insertion
#
###########################################################################
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DipPy (Alaric Taylor, 2017)"))
self.ManualControlLabel.setText(_translate("MainWindow", "Manual control"))
self.EngageButton.setText(_translate("MainWindow", "Engage"))
self.DisengageButton.setText(_translate("MainWindow", "Disengage"))
self.VelocityLimitLabel.setText(_translate("MainWindow", "Velocity limit"))
self.VelocityLimitUnits.setText(_translate("MainWindow", "mm/min"))
self.AcceleratioLabel.setText(_translate("MainWindow", "Acceleration"))
self.AccelerationUnits.setText(_translate("MainWindow", "mm/min^2"))
self.ManualUpButton.setText(_translate("MainWindow", "Up"))
self.PositionLabel.setText(_translate("MainWindow", "Position"))
self.PositionUnits.setText(_translate("MainWindow", "mm"))
self.ZeroButton.setText(_translate("MainWindow", "Zero"))
self.ManualDownButton.setText(_translate("MainWindow", "Down"))
self.ProgrammedMoveLabel.setText(_translate("MainWindow", "Programmed move"))
self.ProgrammedUpButton.setText(_translate("MainWindow", "Up"))
self.StrokeLengthLabel.setText(_translate("MainWindow", "Total stroke length"))
self.StrokeLengthUnits.setText(_translate("MainWindow", "mm"))
self.RampDistanceLabel.setText(_translate("MainWindow", "Ramp distance"))
self.RampDistanceUnits.setText(_translate("MainWindow", "mm"))
self.FinalSpeedLabel.setText(_translate("MainWindow", "Final speed"))
self.FinalSpeedUnits.setText(_translate("MainWindow", "mm/min"))
self.ProgrammedDownButton.setText(_translate("MainWindow", "Down"))
self.CalculateButton.setText(_translate("MainWindow", "Calculate"))
self.StrokeTimeLabel.setText(_translate("MainWindow", "Stroke time"))
self.StrokeTimeUnits.setText(_translate("MainWindow", "mins"))
###############################################################################
#
# Begin inserted code
# Function with the timer
def ProgrammedDisengage(self,seconds):
time.sleep(seconds)
self.clickedDisengageButton()
# End inserted code
#
###############################################################################
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
loop = QEventLoop(app)
asyncio.set_event_loop(loop) # NEW must set the event loop
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
#sys.exit(app.exec_())
###########################################################################
#
# Begin inserted code
with loop:
loop.run_forever()
# app.exec_()
try:
ch.close()
print("Closed Stepper device")
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
exit(0)
# End inserted code
#
###########################################################################
|
botmonitor.py
|
#########################################################
from os import environ
from utils.misc import geturljson
from time import sleep
import heroku3
from random import randint
from threading import Thread
#########################################################
BOT_TOKENS = environ["BOT_TOKENS"]
tokens = {}
for tokenstr in BOT_TOKENS.split(";"):
tokenparts = tokenstr.split(":")
tokens[tokenparts[1]] = tokenparts[0]
print("tokens", tokens)
#########################################################
class BotMonitor:
def __init__(self, id, aliases):
self.id = id
self.aliases = aliases
self.conn = None
self.app = None
self.online = False
self.alias = None
def getonline(self):
resjson = geturljson("https://lichess.org/api/users/status?ids={}".format(self.id))[0]
self.online = resjson.get("online", False)
return self.online
def getconn(self, token):
self.conn = heroku3.from_key(token)
def getapp(self, alias):
global tokens
self.alias = alias
token = tokens[alias]
self.getconn(token)
self.app = self.conn.apps()[alias]
def scale(self, kind, value):
print("scaling bot {} {} {} {}".format(self.id, self.alias, kind, value))
self.app.process_formation()[kind].scale(value)
#########################################################
bots = [
BotMonitor("lichapibot", ["obscure-cove-74967", "lgbotapi", "lgbotapi2"]),
BotMonitor("atomicchessbot", ["dry-falls-24950", "lgbotatom", "lgbotatom2"]),
BotMonitor("randommoverbot", ["vast-harbor-23643", "lgbotrand", "lgbotrand2"]),
BotMonitor("capturebot", ["calm-tundra-35866", "lgbotcapt", "lgbotcapt2"])
]
#########################################################
def botmonitorthread(bot):
global tokens
if environ.get("NOBOTMON", False):
print("no bot monitor {}".format(bot.id))
return
wait = randint(5, 60)
#print("waiting {} sec(s) to monitor {}".format(wait, bot.id))
sleep(wait)
#print("start monitoring {}".format(bot.id))
while True:
#print("checking bot {}".format(bot.id))
try:
if bot.getonline():
#print("bot {} is online".format(bot.id))
pass
else:
print("bot {} is offline".format(bot.id))
first = True
success = False
for alias in bot.aliases:
print("trying alias {}".format(alias))
bot.getapp(alias)
if first:
kind = "worker"
first = False
else:
kind = "web"
print("stopping {}".format(alias))
sleep(1)
bot.scale(kind, 0)
if not success:
print("starting {}".format(alias))
sleep(1)
bot.scale(kind, 1)
print("waiting for {}".format(alias))
sleep(90)
if bot.getonline():
print("success {} is online".format(alias))
success = True
else:
print("failed to get {} online".format(alias))
print("stopping {}".format(alias))
bot.scale(kind, 0)
except:
print("there was a problem checking bot")
sleep(180)
for bot in bots:
Thread(target = botmonitorthread, args = (bot,)).start()
|
dsr_service_drl_simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import pass : DSR_ROBOT.py
# for single robot
ROBOT_SYSTEM_VIRTUAL = 1
ROBOT_SYSTEM_REAL = 0
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
def shutdown():
print "shutdown time!"
print "shutdown time!"
print "shutdown time!"
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" actual_mode : %d" % (msg.actual_mode))
print(" actual_space : %d" % (msg.actual_space))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
print(" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))
print(" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))
print(" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))
print(" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))
print(" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5]))
print(" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))
print(" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))
print(" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))
print(" solution_space : %d" % (msg.solution_space))
sys.stdout.write(" rotation_matrix : ")
for i in range(0 , 3):
sys.stdout.write( "dim : [%d]"% i)
sys.stdout.write(" [ ")
for j in range(0 , 3):
sys.stdout.write("%d " % msg.rotation_matrix[i].data[j])
sys.stdout.write("] ")
print ##end line
print(" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))
print(" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))
print(" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))
print(" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))
print(" sync_time : %7.3f" % (msg.sync_time))
print(" actual_bk : %d %d %d %d %d %d" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))
print(" actual_bt : %d %d %d %d %d " % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))
print(" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))
print(" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))
#print digital i/o
sys.stdout.write(" ctrlbox_digital_input : ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_input[i])
print ##end line
sys.stdout.write(" ctrlbox_digital_output: ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_output[i])
print
sys.stdout.write(" flange_digital_input : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_input[i])
print
sys.stdout.write(" flange_digital_output : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_output[i])
print
#print modbus i/o
sys.stdout.write(" modbus_state : " )
if len(msg.modbus_state) > 0:
for i in range(0 , len(msg.modbus_state)):
sys.stdout.write("[" + msg.modbus_state[i].modbus_symbol)
sys.stdout.write(", %d] " % msg.modbus_state[i].modbus_value)
print
print(" access_control : %d" % (msg.access_control))
print(" homming_completed : %d" % (msg.homming_completed))
print(" tp_initialized : %d" % (msg.tp_initialized))
print(" mastering_need : %d" % (msg.mastering_need))
print(" drl_stopped : %d" % (msg.drl_stopped))
print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('dsr_service_drl_simple_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_subscriber)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
set_robot_mode(ROBOT_MODE_AUTONOMOUS)
drlCodeMove = "set_velj(50)\nset_accj(50)\nmovej([0,0,90,0,90,0])\n"
drlCodeReset = "movej([0,0,0,0,0,0])\n"
drl_script_run(ROBOT_SYSTEM_REAL, drlCodeMove + drlCodeReset)
while not rospy.is_shutdown():
pass
print 'good bye!'
|
tello.py
|
from collections import namedtuple
import socket
import threading
from threading import Thread
import queue
from datetime import datetime
import time
import traceback
Address = namedtuple('Address', 'ip, port')
class AtomicInt(object):
def __init__(self, initial=0):
self.value = initial
self._lock = threading.Lock()
def increment(self, num=1):
with self._lock:
self.value += num
return self.value
class LogItem(object):
"""
Log item.
"""
def __init__(self, command, id):
"""
Ctor.
:param command: Command.
:param id: ID.
"""
self.command = command
self.response = None
self.id = id
self.start_time = datetime.now()
self.end_time = None
self.duration = None
self.drone_ip = None
def add_response(self, response, ip):
"""
Adds a response.
:param response: Response.
:param ip: IP address.
:return: None.
"""
if self.response == None:
self.response = response
self.end_time = datetime.now()
self.duration = self._get_duration()
self.drone_ip = ip
def _get_duration(self):
"""
Gets the duration.
:return: Duration (seconds).
"""
diff = self.end_time - self.start_time
return diff.total_seconds()
def print_stats(self):
"""
Prints statistics.
:return: None.
"""
print(self.get_stats())
def got_response(self):
"""
Checks if response was received.
:return: A boolean indicating if response was received.
"""
return False if self.response is None else True
def get_stats(self):
"""
Gets the statistics.
:return: Statistics.
"""
return {
'id': self.id,
'command': self.command,
'response': self.response,
'start_time': self.start_time,
'end_time': self.end_time,
'duration': self.duration
}
def get_stats_delimited(self):
stats = self.get_stats()
keys = ['id', 'command', 'response', 'start_time', 'end_time', 'duration']
vals = [f'{k}={stats[k]}' for k in keys]
vals = ', '.join(vals)
return vals
def __repr__(self):
return self.get_stats_delimited()
class Swarm(object):
def __init__(self, local_address, drones, commands):
self.n_drones_ready = AtomicInt()
self.local_address = local_address
self.drones = drones
self.commands = commands
def _get_local_address(self):
return (self.local_address.ip, self.local_address.port)
def init(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(self._get_local_address())
for drone in self.drones:
drone.init(self.socket, self)
n_drones = len(self.drones)
while True:
if self.n_drones_ready.value != n_drones:
time.sleep(0.5)
else:
print(f'SWARM | {n_drones} initialized')
break
def deinit(self):
try:
self.socket.close()
print(f'SWARM DEINIT | socket_close | success')
except Exception as e:
print(f'SWARM DEINIT | socket_close | {e}')
for drone in self.drones:
drone.deinit()
def start(self):
for command in self.commands:
if '>' in command:
self._handle_command(command)
elif 'sync' in command:
self._handle_sync(command)
def _handle_command(self, command):
for drone in self.drones:
drone.add_command(command)
def _handle_sync(self, command):
def get_time_out(command):
tokens = [t.strip() for t in command.partition('sync')]
tokens = [t for t in tokens if len(t) > 0]
return float(tokens[1])
TIME_OUT = get_time_out(command)
self._sync(TIME_OUT)
def wait(self):
self._sync()
def _sync(self, TIME_OUT=None):
def get_queues():
return [d.queue for d in self.drones]
def all_queues_empty():
queues = [q for q in get_queues() if not q.empty()]
return False if len(queues) > 0 else True
def get_last_logs():
return [d.logs[-1] for d in self.drones]
def all_responses_received():
logs = [log for log in get_last_logs() if not log.got_response()]
return False if len(logs) > 0 else True
start = time.time()
while not all_queues_empty():
if TIME_OUT is not None:
now = time.time()
diff = now - start
if diff > TIME_OUT:
print(f'SYNC | FAILED | queues_not_empty | waited {diff:.1f} | exceeded {TIME_OUT}')
break
else:
time.sleep(0.5)
while not all_responses_received():
if TIME_OUT is not None:
now = time.time()
diff = now - start
if diff > TIME_OUT:
print(f'SYNC | FAILED | responses_not_received | waited {diff:.1f} | exceeded {TIME_OUT}')
break
else:
time.sleep(0.5)
def finished_initializing(self, drone):
self.n_drones_ready.increment()
class Drone(object):
def __init__(self, tid, drone_address):
self.tid = tid
self.drone_address = drone_address
self.queue = queue.Queue()
self.logs = []
self.active = False
self.n_commands = 0
self.n_responses = 0
def _get_drone_address(self):
return (self.drone_address.ip, self.drone_address.port)
def init(self, socket, listener=None):
self.socket = socket
self.stop_thread = False
self.receive_thread = Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
self.send_thread = Thread(target=self._send_thread)
self.send_thread.daemon = True
self.send_thread.start()
if listener is not None:
listener.finished_initializing(self)
def deinit(self):
self.stop_thread = True
try:
self.receive_thread.join()
print(f'DEINIT | {self.__repr__()} | receive_thread | join success')
except Exception as e:
print(f'DEINIT | {self.__repr__()} | receive_thread | {e}')
try:
self.send_thread.join()
print(f'DEINIT | {self.__repr__()} | send_thread | join success')
except Exception as e:
print(f'DEINIT | {self.__repr__()} | send_thread | {e}')
def add_command(self, command):
"""
Queues up a command to send.
:param command: Command.
"""
tokens = [t.strip() for t in command.partition('>')]
tid, action = tokens[0], tokens[2]
if tid == '*' or tid == str(self.tid):
print(f'ACTION | {self.__repr__()} | queued | {action}')
self.queue.put(action)
def __repr__(self):
return f'TELLO@{self.drone_address.ip}:{self.drone_address.port}'
def _wait_for_response(self):
COMMAND_TIME_OUT = 10.0
start = time.time()
while not self.logs[-1].got_response():
now = time.time()
diff = now - start
if diff > COMMAND_TIME_OUT:
print(f'WAIT RESPONSE | {self.__repr__()} | timeout | {diff} | {self.logs[-1].command} | {self.logs[-1].got_response()}')
break
else:
# print(f'WAIT RESPONSE | {self.__repr__()} | waiting | {diff} | {self.logs[-1].command} | {self.logs[-1].got_response()}')
pass
# print(f'WAIT RESPONSE | {self.__repr__()} | finished_waiting | {diff} | {self.logs[-1].command} | {self.logs[-1].got_response()}')
def _send_thread(self):
"""
Sends commands to the drone.
:return: None.
"""
while True:
if self.stop_thread:
print('THREAD | SEND | stopping')
break
if self.queue.empty():
continue
if self.n_responses < self.n_commands:
continue
try:
command = self.queue.get()
self.socket.sendto(command.encode('utf-8'), self._get_drone_address())
print(f'COMMAND | {self.__repr__()} | {command}')
self.n_commands = self.n_commands + 1
log_item = LogItem(command, self.n_commands)
self.logs.append(log_item)
self._wait_for_response()
except socket.error as e:
print(f'THREAD | SEND | socket_error | {e}')
finally:
# print(f'CR | {self.n_commands} | {self.n_responses}')
pass
def _receive_thread(self):
"""
Listens to responses from the drone.
:return: None.
"""
while True:
if self.stop_thread:
print('THREAD | RECEIVE | stopping')
break
try:
response, ip = self.socket.recvfrom(1024)
response = response.decode('utf-8').strip()
ip = ''.join(str(ip[0]))
self.n_responses = self.n_responses + 1
if response.upper() == 'OK' and not self.active:
# print(f'RESPONSE | {self.__repr__()} is active | {self.logs[-1].command} | {response} | {ip}')
print(f'RESPONSE | {self.__repr__()} is active | {response} | {ip}')
self.active = True
else:
# print(f'RESPONSE | {self.__repr__()} | {self.logs[-1].command} | {response} | {ip}')
print(f'RESPONSE | {self.__repr__()} | {response} | {ip}')
self.logs[-1].add_response(response, ip)
except socket.error as e:
# print(f'THREAD | RECEIVE | socket_error | {e}')
pass
finally:
# print(f'CR | {self.n_commands} | {self.n_responses}')
pass
if __name__ == '__main__':
drones = [
Drone(0, Address('192.168.3.101', 8889)),
Drone(1, Address('192.168.3.103', 8889)),
Drone(1, Address('192.168.3.104', 8889))
]
# drones = [
# Drone(0, Address('192.168.3.104', 8889))
# ]
commands = [
'* > command',
'* > battery?',
'* > takeoff',
'sync 8',
'* > up 50',
'sync 8',
'* > cw 90',
'sync 8',
'* > ccw 90',
'sync 8',
'* > flip f',
'sync 8',
'* > flip b',
'sync 8',
'* > flip l',
'sync 8',
'* > flip r',
'sync 8',
'* > flip f',
'sync 8',
'* > flip b',
'sync 8',
'* > land',
'* > battery?'
]
# commands = [
# '* > command',
# '* > battery?'
# ]
swarm = Swarm(Address('', 8889), drones, commands)
try:
swarm.init()
swarm.start()
swarm.wait()
except KeyboardInterrupt as ki:
print('KEYBOARD INTERRUPT')
except Exception as e:
traceback.print_exc()
finally:
swarm.deinit()
|
executors.py
|
# -*- coding: utf-8 -*-
""" Single and multi-threaded executors."""
import os
import tempfile
import threading
from abc import ABCMeta, abstractmethod
import datetime
from typing import (Any, Dict, List, Optional, # pylint: disable=unused-import
Set, Text, Tuple)
from schema_salad.validate import ValidationException
import six
from six import string_types
import psutil
from .builder import Builder # pylint: disable=unused-import
from .errors import WorkflowException
from .loghandler import _logger
from .job import JobBase # pylint: disable=unused-import
from .mutation import MutationManager
from .provenance import CreateProvProfile
from .process import (Process, # pylint: disable=unused-import
cleanIntermediate, relocateOutputs)
from .utils import DEFAULT_TMP_PREFIX
from .context import RuntimeContext, getdefault # pylint: disable=unused-import
from .workflow import Workflow, WorkflowJob, WorkflowJobStep
class JobExecutor(six.with_metaclass(ABCMeta, object)):
""" Abstract base job executor. """
def __init__(self):
# type: (...) -> None
self.final_output = [] # type: List
self.final_status = [] # type: List
self.output_dirs = set() # type: Set
def __call__(self, *args, **kwargs):
return self.execute(*args, **kwargs)
def output_callback(self, out, process_status):
""" Collect the final status and outputs. """
self.final_status.append(process_status)
self.final_output.append(out)
@abstractmethod
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
""" Execute the jobs for the given Process. """
pass
def execute(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
runtime_context, # type: RuntimeContext
logger=_logger,
): # type: (...) -> Tuple[Optional[Dict[Text, Any]], Text]
""" Execute the process. """
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
finaloutdir = None # Type: Optional[Text]
original_outdir = runtime_context.outdir
if isinstance(original_outdir, string_types):
finaloutdir = os.path.abspath(original_outdir)
runtime_context = runtime_context.copy()
runtime_context.outdir = tempfile.mkdtemp(
prefix=getdefault(runtime_context.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))
self.output_dirs.add(runtime_context.outdir)
runtime_context.mutation_manager = MutationManager()
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
job_reqs = None
if "cwl:requirements" in job_order_object:
job_reqs = job_order_object["cwl:requirements"]
elif ("cwl:defaults" in process.metadata
and "cwl:requirements" in process.metadata["cwl:defaults"]):
job_reqs = process.metadata["cwl:defaults"]["cwl:requirements"]
if job_reqs:
for req in job_reqs:
process.requirements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
if self.final_output and self.final_output[0] and finaloutdir:
self.final_output[0] = relocateOutputs(
self.final_output[0], finaloutdir, self.output_dirs,
runtime_context.move_outputs, runtime_context.make_fs_access(""),
getdefault(runtime_context.compute_checksum, True))
if runtime_context.rm_tmpdir:
cleanIntermediate(self.output_dirs)
if self.final_output and self.final_status:
if runtime_context.research_obj is not None and \
isinstance(process, (JobBase, Process, WorkflowJobStep,
WorkflowJob)) and process.parent_wf:
process_run_id = None
name = "primary"
process.parent_wf.generate_output_prov(self.final_output[0],
process_run_id, name)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri, None, process.parent_wf.engine_uuid,
datetime.datetime.now())
process.parent_wf.finalize_prov_profile(name)
return (self.final_output[0], self.final_status[0])
return (None, "permanentFail")
class SingleJobExecutor(JobExecutor):
""" Default single-threaded CWL reference executor. """
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
process_run_id = None # type: Optional[str]
reference_locations = {} # type: Dict[Text,Text]
# define provenance profile for single commandline tool
if not isinstance(process, Workflow) \
and runtime_context.research_obj is not None:
orcid = runtime_context.orcid
full_name = runtime_context.cwl_full_name
process.provenance_object = CreateProvProfile(
runtime_context.research_obj, orcid, full_name)
process.parent_wf = process.provenance_object
jobiter = process.job(job_order_object, self.output_callback,
runtime_context)
try:
for job in jobiter:
if job:
if runtime_context.builder is not None:
job.builder = runtime_context.builder
if job.outdir:
self.output_dirs.add(job.outdir)
if runtime_context.research_obj is not None:
if not isinstance(process, Workflow):
runtime_context.prov_obj = process.provenance_object
else:
runtime_context.prov_obj = job.prov_obj
assert runtime_context.prov_obj
process_run_id, reference_locations = \
runtime_context.prov_obj.evaluate(
process, job, job_order_object,
runtime_context.make_fs_access,
runtime_context)
runtime_context = runtime_context.copy()
runtime_context.process_run_id = process_run_id
runtime_context.reference_locations = \
reference_locations
job.run(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
except (ValidationException, WorkflowException):
raise
except Exception as e:
logger.exception("Got workflow error")
raise WorkflowException(Text(e))
class MultithreadedJobExecutor(JobExecutor):
"""
Experimental multi-threaded CWL executor.
Does simple resource accounting, will not start a job unless it
has cores / ram available, but does not make any attempt to
optimize usage.
"""
def __init__(self): # type: () -> None
super(MultithreadedJobExecutor, self).__init__()
self.threads = set() # type: Set[threading.Thread]
self.exceptions = [] # type: List[WorkflowException]
self.pending_jobs = [] # type: List[JobBase]
self.pending_jobs_lock = threading.Lock()
self.max_ram = psutil.virtual_memory().available / 2**20
self.max_cores = psutil.cpu_count()
self.allocated_ram = 0
self.allocated_cores = 0
def select_resources(self, request, runtime_context): # pylint: disable=unused-argument
# type: (Dict[str, int], RuntimeContext) -> Dict[str, int]
""" Naïve check for available cpu cores and memory. """
result = {} # type: Dict[str, int]
maxrsc = {
"cores": self.max_cores,
"ram": self.max_ram
}
for rsc in ("cores", "ram"):
if request[rsc+"Min"] > maxrsc[rsc]:
raise WorkflowException(
"Requested at least %d %s but only %d available" %
(request[rsc+"Min"], rsc, maxrsc[rsc]))
if request[rsc+"Max"] < maxrsc[rsc]:
result[rsc] = request[rsc+"Max"]
else:
result[rsc] = maxrsc[rsc]
return result
def run_job(self,
job, # type: JobBase
runtime_context # type: RuntimeContext
): # type: (...) -> None
""" Execute a single Job in a seperate thread. """
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
while self.pending_jobs:
with self.pending_jobs_lock:
job = self.pending_jobs[0]
if isinstance(job, JobBase):
if ((self.allocated_ram + job.builder.resources["ram"])
> self.max_ram or
(self.allocated_cores + job.builder.resources["cores"])
> self.max_cores):
return
self.pending_jobs.remove(job)
def runner(my_job, my_runtime_context):
""" Job running thread. """
try:
my_job.run(my_runtime_context)
except WorkflowException as err:
_logger.exception("Got workflow error")
self.exceptions.append(err)
except Exception as err: # pylint: disable=broad-except
_logger.exception("Got workflow error")
self.exceptions.append(WorkflowException(Text(err)))
finally:
with my_runtime_context.workflow_eval_lock:
self.threads.remove(threading.current_thread())
if isinstance(my_job, JobBase):
self.allocated_ram -= my_job.builder.resources["ram"]
self.allocated_cores -= my_job.builder.resources["cores"]
my_runtime_context.workflow_eval_lock.notifyAll()
thread = threading.Thread(
target=runner, args=(job, runtime_context))
thread.daemon = True
self.threads.add(thread)
if isinstance(job, JobBase):
self.allocated_ram += job.builder.resources["ram"]
self.allocated_cores += job.builder.resources["cores"]
thread.start()
def wait_for_next_completion(self, runtimeContext): # type: (RuntimeContext) -> None
""" Wait for jobs to finish. """
if runtimeContext.workflow_eval_lock is not None:
runtimeContext.workflow_eval_lock.wait()
if self.exceptions:
raise self.exceptions[0]
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
jobiter = process.job(job_order_object, self.output_callback,
runtime_context)
if runtime_context.workflow_eval_lock is None:
raise WorkflowException(
"runtimeContext.workflow_eval_lock must not be None")
runtime_context.workflow_eval_lock.acquire()
for job in jobiter:
if job is not None:
if runtime_context.builder is not None:
job.builder = runtime_context.builder
if job.outdir:
self.output_dirs.add(job.outdir)
self.run_job(job, runtime_context)
if job is None:
if self.threads:
self.wait_for_next_completion(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
while self.threads:
self.wait_for_next_completion(runtime_context)
runtime_context.workflow_eval_lock.release()
|
generic_websocket.py
|
"""
Module used as a interfeace to describe a generick websocket client
"""
import asyncio
import websockets
import socket
import json
import time
from threading import Thread, Lock
from pyee import AsyncIOEventEmitter
from ..utils.custom_logger import CustomLogger
# websocket exceptions
from websockets.exceptions import ConnectionClosed, InvalidStatusCode
class AuthError(Exception):
"""
Thrown whenever there is a problem with the authentication packet
"""
pass
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
class Socket():
def __init__(self, sId):
self.ws = None
self.isConnected = False
self.isAuthenticated = False
self.id = sId
self.lock = Lock()
def set_connected(self):
self.isConnected = True
def set_disconnected(self):
self.isConnected = False
def set_authenticated(self):
self.isAuthenticated = True
def set_unauthenticated(self):
self.isAuthenticated = False
def set_websocket(self, ws):
self.ws = ws
async def send(self, data):
with self.lock:
await self.ws.send(data)
def _start_event_worker():
return AsyncIOEventEmitter()
class GenericWebsocket:
"""
Websocket object used to contain the base functionality of a websocket.
Inlcudes an event emitter and a standard websocket client.
"""
logger = CustomLogger('BfxWebsocket', logLevel="DEBUG")
def __init__(self, host, logLevel='INFO', max_retries=5, create_event_emitter=None):
self.host = host
self.logger.set_level(logLevel)
# overide 'error' event to stop it raising an exception
# self.events.on('error', self.on_error)
self.ws = None
self.max_retries = max_retries
self.attempt_retry = True
self.sockets = {}
# start separate process for the even emitter
create_ee = create_event_emitter or _start_event_worker
self.events = create_ee()
def run(self):
"""
Start the websocket connection. This functions spawns the initial socket
thread and connection.
"""
self._start_new_socket()
def get_task_executable(self):
"""
Get the run indefinitely asyncio task
"""
return self._run_socket()
def _start_new_socket(self, socketId=None):
if not socketId:
socketId = len(self.sockets)
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_until_complete(self._run_socket())
worker_loop = asyncio.new_event_loop()
worker = Thread(target=start_loop, args=(worker_loop,))
worker.start()
return socketId
def _wait_for_socket(self, socket_id):
"""
Block until the given socket connection is open
"""
while True:
socket = self.sockets.get(socket_id, False)
if socket:
if socket.isConnected and socket.ws:
return
time.sleep(0.01)
def get_socket(self, socketId):
return self.sockets[socketId]
def get_authenticated_socket(self):
for socketId in self.sockets:
if self.sockets[socketId].isAuthenticated:
return self.sockets[socketId]
return None
async def _run_socket(self):
retries = 0
sId = len(self.sockets)
s = Socket(sId)
self.sockets[sId] = s
loop = asyncio.get_event_loop()
while self.max_retries == 0 or (retries < self.max_retries and self.attempt_retry):
try:
async with websockets.connect(self.host) as websocket:
self.sockets[sId].set_websocket(websocket)
self.sockets[sId].set_connected()
self.logger.info("Websocket connected to {}".format(self.host))
retries = 0
while True:
# optimization - wait 0 seconds to force the async queue
# to be cleared before continuing
await asyncio.sleep(0)
message = await websocket.recv()
await self.on_message(sId, message)
except (ConnectionClosed, socket.error, InvalidStatusCode) as e:
self.sockets[sId].set_disconnected()
if self.sockets[sId].isAuthenticated:
self.sockets[sId].set_unauthenticated()
self._emit('disconnected')
if (not self.attempt_retry):
return
self.logger.error(str(e))
retries += 1
# wait 5 seconds befor retrying
self.logger.info("Waiting 5 seconds before retrying...")
await asyncio.sleep(5)
self.logger.info("Reconnect attempt {}/{}".format(retries, self.max_retries))
self.logger.info("Unable to connect to websocket.")
self._emit('stopped')
async def stop(self):
"""
Stop all websocket connections
"""
self.attempt_retry = False
for key, socket in self.sockets.items():
await socket.ws.close()
self._emit('done')
def remove_all_listeners(self, event):
"""
Remove all listeners from event emitter
"""
self.events.remove_all_listeners(event)
def on(self, event, func=None):
"""
Add a new event to the event emitter
"""
if not func:
return self.events.on(event)
self.events.on(event, func)
def once(self, event, func=None):
"""
Add a new event to only fire once to the event
emitter
"""
if not func:
return self.events.once(event)
self.events.once(event, func)
def _emit(self, event, *args, **kwargs):
self.events.emit(event, *args, **kwargs)
async def on_error(self, error):
"""
On websocket error print and fire event
"""
self.logger.error(error)
async def on_close(self):
"""
This is used by the HF data server.
"""
self.stop()
async def on_open(self):
"""
On websocket open
"""
pass
async def on_message(self, message):
"""
On websocket message
"""
pass
|
stats.py
|
from client import client
from datetime import datetime
import discord
import os
import socket
import threading
import time
try:
import psutil
except ModuleNotFoundError:
has_psutil = False
else:
has_psutil = True
cmd_name = "stats"
client.basic_help(title=cmd_name, desc=f"shows various running statistics of {client.bot_name}")
detailed_help = {
"Usage": f"{client.default_prefix}{cmd_name}",
"Description": f"This command shows different available statistics of {client.bot_name}, including servers, uptime, and commands run.",
"Related": f"`{client.default_prefix} info` - shows information about {client.bot_name}",
}
client.long_help(cmd=cmd_name, mapping=detailed_help)
@client.ready
async def readier():
def psutil_update_thread_loop(client):
while client.active:
# self_process.cpu_percent() # not sure how to optimize this loop in another thread so we're going to
# comment it out and deal with it for now
psutil.cpu_percent(percpu=True)
time.sleep(5)
global psutil_update_thread
psutil_update_thread = threading.Thread(target=psutil_update_thread_loop, name="PSUtil_Background_Loop", args=[client])
return
@client.command(trigger=cmd_name, aliases=["statistics", "s"])
async def statistics(command: str, message: discord.Message):
if "--hostname" in command:
include_hostname = True
else:
include_hostname = False
if "--uptime" in command:
up = time.perf_counter() - client.first_execution
await message.channel.send(f"Uptime:\n`{up:.3f}` seconds\n`{up/86400:.4f}` days")
return
async with message.channel.typing():
if has_psutil:
try:
temp = psutil.sensors_temperatures()['cpu-thermal'][0].current
except (AttributeError, KeyError):
temp = None
self = psutil.Process()
cpu_self = self.cpu_percent(interval=1)
self_m_used = self.memory_info().rss
m_raw = psutil.virtual_memory()
m_total = m_raw.total
m_available = m_raw.available
m_used = m_total - m_available
cpu = psutil.cpu_percent(percpu=True)
index = 0
cpu_text = ""
for v in cpu:
cpu_text += f"**CPU {index}:** {v}%\n"
index += 1
embed = discord.Embed(title=f"{client.bot_name} stats", description=discord.Embed.Empty, color=0x404040)
up = time.perf_counter() - client.first_execution
embed = embed.add_field(name="Uptime", value=f"{up:.3f} seconds\n{up/86400:.4f} days")
embed = embed.add_field(name="Servers", value=len(client.guilds))
embed = embed.add_field(name="Total commands run in all servers since last reboot", value=client.command_count, inline=False)
mps = client.message_count / up
msg_freq = up / client.message_count
embed = embed.add_field(name="Total messages sent in all servers since last reboot", value=f"{client.message_count} ({mps:.4f}/sec) ({msg_freq:.4f} sec/message)", inline=False)
n_connected = len(client.voice_clients)
n_playing = len([x for x in client.voice_clients if x.is_playing()])
embed = embed.add_field(name="Connected voice chats", value=f"{n_connected} ({n_playing} playing)")
embed = embed.add_field(name="Bot Process ID", value=os.getpid())
if include_hostname: embed = embed.add_field(name="Host Machine Name", value=socket.gethostname())
if has_psutil:
embed = embed.add_field(name="Host CPU temperature", value=f"{int(temp) if temp is not None else 'Unknown'}")
embed = embed.add_field(name="Process Memory Usage", value=f"{self_m_used/(1024*1024):.3f} MiB")
embed = embed.add_field(name="Process CPU Usage (relative to one core)", value=f"{cpu_self:.1f}%")
embed = embed.add_field(name="System RAM Usage", value=f"{m_used/(1024*1024):.1f}/{m_total/(1024*1024):.1f} MiB ({(m_used/m_total)*100:.2f}%)")
embed = embed.add_field(name="System CPU Usage", value=cpu_text, inline=False)
embed = embed.set_footer(text=datetime.utcnow().__str__())
await message.channel.send(embed=embed)
|
dnsserver.py
|
import configparser
import socket
import re
import binascii
from datetime import datetime
import threading
import time
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import sqlite3
#///////////////////// thread functions /////////////////////#
def load_blacklist_from_local_file(interval):
interval = int(interval)
global blacklist_file
global blacklist
try:
while True:
time.sleep(interval)
with open(blacklist_file) as f:
content = f.readlines()
blacklist_lines = [x.strip() for x in content]
write("<thread>implementing blacklist file to memory, entry count: " + str(len(blacklist_lines)))
blacklist = list(dict.fromkeys(blacklist_lines))
blacklist_lines.clear()
except Exception as err:
write("Error occurred: " + str(err))
def load_blacklist_from_web(interval):
interval = int(interval)
global blacklist_file
global blacklist
while True:
time.sleep(interval)
try:
append_counter = 0
write("<thread>requesting malicious domain names")
x = requests.get("https://www.usom.gov.tr/url-list.txt", verify=False, timeout=40)
if x.status_code == 200:
write("<thread>malicious domain names fetched, applying regex")
regex_results = re.findall('.*[a-z]', x.text)
malic_dns_list = list(dict.fromkeys(regex_results))
write("<thread>regex applied")
write("<thread>appending malicious domain names into file, just not exist ones.")
with open(blacklist_file) as x:
content = x.readlines()
local_blacklist_file_list = [x.strip() for x in content]
with open(blacklist_file, 'a') as f:
for dns_line in malic_dns_list:
if dns_line not in local_blacklist_file_list and '/' not in dns_line:
f.write((dns_line.replace("\x20", "").replace("\n", "")).strip() + "\n")
append_counter += 1
write("<thread>append process complete with " + str(append_counter) + " new records, clearing RAM")
write("<thread>RAM cleared, update process complete")
append_counter = 0
malic_dns_list.clear()
content.clear()
local_blacklist_file_list.clear()
else:
write("can not fetch the domain list from web.")
except Exception as err:
write("Error occurred: " + str(err))
def start_threads(interval_local, interval_web):
t1 = threading.Thread(target=load_blacklist_from_local_file, args=(interval_local,), daemon=True)
t2 = threading.Thread(target=load_blacklist_from_web, args=(interval_web,), daemon=True)
t1.start()
t2.start()
#///////////////////// utility function /////////////////////#
def write(text):
global log_file
try:
text = str(datetime.now().strftime("%d/%m/%Y %H:%M:%S") + " - " + text)
print(text)
with open(log_file, 'a') as f:
f.write(text + "\n")
except Exception as err:
print("Error occurred:", str(err))
def log_dns_query(is_secure, dns):
global db
global db_cursor
db_cursor.execute("INSERT INTO dns_queries (date, is_secure, dns) VALUES (?, ?, ?)", (str(datetime.now().strftime("%d/%m/%Y %H:%M:%S")), str(is_secure), dns))
db.commit()
def build_alert(addr, dns):
global db
global db_cursor
db_cursor.execute("INSERT INTO alerts (date, addr, alert_data) VALUES (?, ?, ?)", (str(datetime.now().strftime("%d/%m/%Y %H:%M:%S")), addr, dns))
db.commit()
#///////////////////// main functions /////////////////////#
def load_blacklist():
global blacklist_file
global blacklist
try:
with open(blacklist_file) as f:
content = f.readlines()
blacklist_lines = [x.strip() for x in content]
write("entry in blacklist file: " + str(len(blacklist_lines)))
write("implementing blacklist file to memory")
blacklist = list(dict.fromkeys(blacklist_lines))
blacklist_lines.clear()
except Exception as err:
print("Error occurred:", str(err))
exit(0)
def start_socket(dns_service_address, dns_service_port):
global glb_sock
try:
write("Starting DNS Server")
glb_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
glb_sock.bind((dns_service_address, int(dns_service_port)))
write("DNS Server started at: " + str(dns_service_address + ":" + str(dns_service_port)))
except Exception as err:
write("Error occurred: " + str(err))
def listen_socket():
global glb_sock
try:
write("Listening socket....\n")
while True:
# get dns request data from client
went_data, addr = glb_sock.recvfrom(512)
# write("went data from client: " + str(went_data))
# parse domain name
domain_for_regex = bytes.fromhex(binascii.hexlify(bytearray(went_data)).decode("utf-8")).decode('utf-8', errors='ignore')
domain_name_list = re.compile('\w{2,}').findall(domain_for_regex)
if len(domain_name_list) > 4:
del domain_name_list[0]
for item in domain_name_list:
if not item.islower():
domain_name_list.remove(item)
dns = ""
for item in domain_name_list:
dns += item + "."
dns = dns[:-1]
write("client" + str(addr) + " requested: " + dns)
# control domain name
write("running security check")
isSecure = control_domain_name(dns)
# if dns not ok, do not answer, dns requester client will timeout
if not isSecure:
pass
write("requested dns is not secure, alerting, no response will be given\n")
log_dns_query(False, dns)
build_alert(str(addr), dns)
else:
# if dns ok, send data to dns resolve service and give the response back to requester
write("requested dns is secure, requesting from resolver")
forward_addr = (dns_resolve_address, int(dns_resolve_port)) # dns and port
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write("requesting to resolver server: " + dns)
client.sendto(bytes(went_data), forward_addr)
responsed_data, _ = client.recvfrom(1024)
write("get response from resolver server, sending to client back")
# write("redirected response data from server: " + str(responsed_data))
glb_sock.sendto(responsed_data, addr)
write("response redirected\n")
log_dns_query(True, dns)
except Exception as err:
write("Error occurred:" + str(err))
def control_domain_name(dns):
pass
global blacklist
if dns in blacklist:
return False
return True
print("(local)-------------------------------------")
print("(local)script started, loading configurations")
config_parser = configparser.ConfigParser()
config_parser.read('config.txt')
dns_service_address = config_parser['general_configuration']['dns_service_address']
dns_service_port = config_parser['general_configuration']['dns_service_port']
dns_resolve_address = config_parser['general_configuration']['dns_resolve_address']
dns_resolve_port = config_parser['general_configuration']['dns_resolve_port']
blacklist_interval_web = config_parser['general_configuration']['blacklist_interval_web']
blacklist_interval_local = config_parser['general_configuration']['blacklist_interval_local']
log_file = config_parser['general_configuration']['log_file']
write("-------------------------------------")
write("-------------------------------------")
write("script started, loading configurations")
write("DNS service IP Address is: " + dns_service_address)
write("DNS service Port is: " + dns_service_port)
write("DNS resolve IP Address is: " + dns_resolve_address)
write("DNS resolve Port is: " + dns_resolve_port)
write("Blacklist database remote update interval: " + blacklist_interval_web)
write("Blacklist memory update interval: " + blacklist_interval_local)
blacklist_file = config_parser['general_configuration']['dns_blacklist']
blacklist = []
glb_sock = None
write("configurations loaded.")
write("starting threads for blacklist operations")
start_threads(blacklist_interval_local, blacklist_interval_web)
write("loading blacklist from local file before start")
load_blacklist()
write("executing database operations")
db = sqlite3.connect(config_parser['general_configuration']['database_file'])
write("building database cursor")
db_cursor = db.cursor()
start_socket(dns_service_address, dns_service_port)
listen_socket()
|
alexa_manager.py
|
import hassapi as hass
import re
import sys
import time
from queue import Queue
from threading import Thread
"""
Class Alexa Manager handles sending text to speech messages to Alexa media players
Following features are implemented:
- Speak text to choosen media_player
- Full queue support to manage async multiple TTS commands
- Full wait to tts to finish to be able to supply a callback method
"""
NOTIFY = "notify/"
SUB_VOICE = [
# ("[.]{2,}", "."),
("[\?\.\!,]+(?=[\?\.\!,])", ""), # Exclude duplicate
("(\s+\.|\s+\.\s+|[\.])(?! )(?![^{]*})(?![^\d.]*\d)", ". "),
("&", " and "), # escape
# ("(?<!\d),(?!\d)", ", "),
("[\n\*]", " "),
(" +", " "),
]
SUB_TEXT = [(" +", " "), ("\s\s+", "\n")]
VOICE_NAMES = (
"Carla",
"Giorgio",
"Bianca",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Matthew",
"Salli",
"Nicole",
"Russell",
"Amy",
"Brian",
"Emma",
"Aditi",
"Raveena",
"Chantal",
"Celine",
"Lea",
"Mathieu",
"Hans",
"Marlene",
"Vicki",
"Aditi",
"Mizuki",
"Takumi",
"Vitoria",
"Camila",
"Ricardo",
"Penelope",
"Lupe",
"Miguel",
"Conchita",
"Enrique",
"Lucia",
"Mia",
)
SUPPORTED_LANGUAGES = [
"it-IT",
"en-US",
"en-CA",
"en-AU",
"en-GB",
"en-IN",
"fr-CA",
"fr-FR",
"de-DE",
"hi-IN",
"ja-JP",
"pt-BR",
"es-US",
"es-ES",
"es-MX",
]
SPEECHCON = [
"a ah",
"abracadabra",
"accidenti",
"accipicchia",
"addio",
"ah",
"ahi",
"ahia",
"alleluia",
"aloha",
"alè",
"anzi",
"apriti sesamo",
"argh",
"arrivederci",
"attenzione",
"auguri",
"badabim badabum",
"badabum",
"bah",
"bam",
"bang",
"bang bang",
"banzai",
"basta",
"batti cinque",
"bau",
"bazinga",
"beh",
"ben fatto",
"bene",
"bene bene",
"bim bum bam",
"bing",
"bingo",
"bip bip",
"bis",
"bla",
"bla bla bla",
"bleah",
"boh",
"boing",
"bravo",
"brrr",
"bum",
"buon appetito",
"buon viaggio",
"buona fortuna",
"buonanotte",
"buonasera",
"buongiorno",
"buu",
"capito",
"caspita",
"cavoli",
"cavolo",
"cawabanga",
"certo",
"chissà",
"ci stai",
"ciao",
"cioè",
"ciuf ciuf",
"clic clac",
"come desideri",
"come no",
"come non detto",
"come va",
"come vuoi",
"contaci",
"coraggio",
"così così",
"cucù",
"d'accordo",
"d'oh",
"dai",
"davvero",
"din don",
"ding",
"ding ding ding",
"dormi bene",
"eh",
"eh beh",
"eh già",
"ehm",
"etciù",
"eureka",
"evviva",
"fiuu",
"geronimo",
"giusto",
"già",
"grande giove",
"grazie",
"ha",
"hey",
"hip hip hurrà",
"hmm",
"hurrà",
"hué hué",
"in guardia",
"incantata",
"kabùm",
"ma dai",
"magari",
"mah",
"mamma mia",
"mi piace",
"miao",
"mistero",
"muu",
"nano nano",
"non mi dire",
"oh",
"oh no",
"oh oh",
"oh sì",
"oh yes",
"oi",
"oink",
"ok",
"okei",
"oooh",
"or ora",
"ping",
"più o meno",
"plop",
"pop",
"poti poti",
"puf",
"pum pum",
"puntini puntini",
"puoi scommetterci",
"qua qua",
"ricevuto",
"roger",
"salute",
"scacco matto",
"scherzavo",
"sogni d'oro",
"splash",
"spoiler alert",
"su ",
"su su",
"swish",
"ta dà",
"taac",
"tic tac",
"tic tic tic",
"tic-toc",
"toc toc",
"toh",
"touché",
"trallalà",
"tsk tsk",
"tump",
"tweet",
"uffa",
"uh la là",
"uh oh",
"uomo in mare",
"vabbè",
"voilà",
"vroom",
"wow",
"yippii",
"zac",
"zap",
]
MOBILE_PUSH = ["push", "dropin", "dropin_notification"]
class Alexa_Manager(hass.Hass):
def initialize(self) -> None:
# self.set_log_level("DEBUG")
self.alexa_service = self.args.get("alexa_service")
# self.alexa_switch_entity = self.args.get("alexa_switch")
self.alexa_select_media_player = self.args.get("alexa_select_media_player")
self.alexa_type = self.args.get("alexa_type")
self.alexa_method = self.args.get("alexa_method")
self.alexa_sensor_media_player = self.args.get("alexa_sensor_media_player")
self.alexa_voice = self.args.get("alexa_voice")
# self.alexa_language = self.args.get("alexa_language")
self.prosody = self.args.get("prosody")
self.wait_time = self.args.get("wait_time")
self.cehck_alexa_service = self._check_alexa(self.alexa_service)
self.queue = Queue(maxsize=0)
self._when_tts_done_callback_queue = Queue()
t = Thread(target=self.worker)
t.daemon = True
t.start()
def speak(self, alexa):
"""Speak the provided text through the media player."""
if not self.cehck_alexa_service:
self.set_sensor(
"I can't find the Alexa Media component", "https://github.com/custom-components/alexa_media_player"
)
return
self.lg(f"-------------------- ALEXA START DISPATCH --------------------")
self.lg(f"FROM DISPATCH: {type(alexa)} value {alexa}")
# remove keys with None value from a dict # TODO
alexa = {k: v for k, v in alexa.items() if v not in [None, "None", ""]}
self.lg(f"REMOVE [NONE] VALUE: {type(alexa)} value {alexa}")
default_restore_volume = float(self.get_state(self.args.get("default_restore_volume"))) / 100
volume = float(alexa.get("volume", default_restore_volume))
message = str(alexa.get("message", alexa.get("message_tts")))
alexa_player = self.player_get(alexa.get("media_player", self.get_state(self.alexa_sensor_media_player)))
alexa_type = (
str(alexa.get("type", self.get_state(self.alexa_type))).lower().replace("dropin", "dropin_notification")
)
# Push notification
push = bool(self.check_bool(alexa.get("push")))
if push or alexa_type in MOBILE_PUSH and message:
message_push = self.remove_tags(self.replace_regular(message, SUB_TEXT))
self.call_service(
NOTIFY + self.alexa_service,
data={"type": "push"} if push else {"type": alexa_type},
target=alexa_player[0], # only one device
title=str(alexa.get("title", "")),
message=message_push,
)
self.lg(f"PUSH: {push} - TYPE: {alexa_type} - MESSAGE: {message_push}")
# Media Content # TODO Restore volume??
media_content_id = alexa.get("media_content_id")
media_content_type = alexa.get("media_content_type")
if media_content_id:
self.volume_get(alexa_player, default_restore_volume)
self.volume_set(alexa_player, volume)
self.call_service(
"media_player/play_media",
entity_id=alexa_player,
media_content_id=media_content_id,
media_content_type=media_content_type,
# extra = {"timer": 10} ##??
)
self.lg(f"Content id: {media_content_id} - Content type: {media_content_type}")
# Queues the message to be handled async, use when_tts_done_do method to supply callback when tts is done
elif alexa_type not in MOBILE_PUSH and message:
self.queue.put(
{
"text": message,
"volume": volume,
"alexa_type": alexa_type,
"alexa_player": alexa_player, # media_player
"default_restore_volume": default_restore_volume,
"alexa_notifier": str(alexa.get("notifier", self.alexa_service)),
"wait_time": float(alexa.get("wait_time", self.get_state(self.wait_time))),
"language": alexa.get("language"), # self.get_state(self.alexa_language)),
"alexa_method": str(alexa.get("method", self.get_state(self.alexa_method)).lower()),
"alexa_voice": str(alexa.get("voice", self.get_state(self.alexa_voice))).capitalize(),
"alexa_audio": alexa.get("audio", None),
"rate": float(alexa.get("rate", self.get_state(self.prosody["rate"]))),
"pitch": float(alexa.get("pitch", self.get_state(self.prosody["pitch"]))),
"ssml_volume": float(alexa.get("ssml_volume", self.get_state(self.prosody["volume"]))),
"whisper": bool(self.check_bool(alexa.get("whisper", False))),
"ssml_switch": bool(self.check_bool(alexa.get("ssml", self.get_state(self.args["ssml_switch"])))),
}
)
self.lg(f"-------------------- ALEXA END DISPATCH --------------------")
def lg(self, message):
self.log(message, level="DEBUG", ascii_encode=False)
def check_bool(self, value):
return str(value).lower() in ["true", "on", "yes", "1"]
def inbetween(self, minv, value, maxv):
return sorted([minv, value, maxv])[1]
def speak_tag(self, value): # TODO tags
return value if "<speak>" in value or not "</" in value else f"<speak>{value}</speak>"
def effect_tag(self, value):
return f"<amazon:effect name='whispered'>{value}</amazon:effect>"
def prosody_tag(self, value, rate, pitch, volume):
if rate != 100.0 or pitch != 0.0 or volume != 0.0:
rate = f"{self.inbetween(20, rate, 200)}%" # min 20% max 200%
pitch = f"{self.inbetween(-33.3, pitch, 50):+g}%" # min -33.3 max +50
volume = f"{self.inbetween(-50, volume, 4.08):+g}dB" # max +4.08dB
return f"<prosody rate='{rate}' pitch='{pitch}' volume='{volume}'> {value} </prosody>"
return value
def audio_tag(self, value: None):
if value is None:
return ""
return f"<audio src='{value}'/>" if "<audio src=" not in value else value
def lang_tag(self, value, lang):
if lang not in SUPPORTED_LANGUAGES:
self.lg(f"NOT SUPPORTED LANGUAGE: {lang}")
return value
self.lg(f"OK ADDED SSML LANGUAGE: {lang}")
return f"<lang xml:lang='{lang}'>{value}</lang>"
def voice_tag(self, value, name):
if name not in VOICE_NAMES:
self.lg(f"NOT SUPPORTED VOICE: {name}")
return value
self.lg(f"OK ADDED VOICE: {name}")
return f"<voice name='{name}'>{value}</voice>"
def say_as_tag(self, value):
return f"<say-as interpret-as='interjection'>{value}</say-as>"
def find_speechcon(self, value):
substrings = sorted(SPEECHCON, key=len, reverse=True)
regex = re.compile(r"\b" + r"\b|\b".join(map(re.escape, substrings)), re.I)
regex_match = re.findall(regex, value)
self.lg(f"FOUND SPEECHCON: {len(regex_match)} -> {regex_match}")
return regex.sub(lambda m: self.say_as_tag(m.group()), value)
def player_get(self, user_player):
media_player = []
user_player = self.converti(str(user_player.lower()))
for mpu in user_player: # MediaPlayerUser
if "test" in mpu:
media_player.extend(self.player_alexa)
if not self.entity_exists(mpu):
mpu = self.dict_select.get(mpu)
if mpu:
if "group." in mpu:
media_player.extend(self.get_state(mpu, attribute="entity_id"))
elif "sensor." in mpu:
media_player.append(self.get_state(mpu))
elif "media_player." in mpu:
media_player.append(mpu)
else:
self.log(f"Invalid group, sensor or player ENTITY-ID ({mpu})", level="WARNING")
if not media_player:
# media_player.append(self.get_state(self.alexa_sensor_media_player))
media_player = self.player_alexa
self.log(f"No media player {user_player} found. I use the default one. ({media_player})", level="WARNING")
media_player = list(set(media_player))
self.lg(f"GET PLAYER: {media_player}")
return media_player
def volume_get(self, media_player, volume: float):
"""Get and save the volume of each media player."""
self.dict_volumes = {m: self.get_state(m, attribute="volume_level", default=volume) for m in media_player}
self.lg(f"GET VOLUMES: {self.dict_volumes}")
return self.dict_volumes
def volume_set(self, media_player, volume: float, **restore: False):
if self.dict_volumes:
for i, j in self.dict_volumes.items():
if j != volume:
if restore:
self.call_service("media_player/volume_set", entity_id=i, volume_level=j)
time.sleep(1)
self.lg(f"OK RESTORE VOL: {i} {j} [State: {self.get_state(i, attribute='volume_level')}]")
else:
self.call_service("media_player/volume_set", entity_id=media_player, volume_level=volume)
self.lg(f"SET VOLUMES: {media_player} {volume}")
break # time.sleep(2)
def replace_char(self, text: str, substitutions: dict):
"""Function that does multiple string replace ops in a single pass."""
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile(r"\b" + r"\b|\b".join(map(re.escape, substrings)), re.I) # r'\b%s\b' % r'\b|\b'
return regex.sub(lambda match: substitutions[str.lower(match.group(0))], text) # added str.lower()
def replace_regular(self, text: str, substitutions: list):
for old, new in substitutions:
regex = re.compile(old)
text = re.sub(regex, new, str(text).strip())
return text
def remove_tags(self, text: str):
"""Remove all tags from a string."""
regex = re.compile("<.*?>")
return re.sub(regex, "", str(text).strip())
def converti(self, stringa) -> list:
regex = re.compile(r"\s*,\s*")
return self.split_device_list(re.sub(regex, ",", stringa))
def has_numbers(self, string):
numbers = re.compile("\d{4,}|\d{3,}\.\d")
return numbers.search(string)
def set_sensor(self, state, error):
attributes = {}
attributes["icon"] = "mdi:amazon-alexa"
attributes["Error"] = error
self.set_state("sensor.centro_notifiche", state=state, attributes=attributes)
def when_tts_done_do(self, callback: callable) -> None:
"""Callback when the queue of tts messages are done."""
self._when_tts_done_callback_queue.put(callback)
def worker(self):
while True:
try:
data = self.queue.get()
self.lg(f"WORKER: {type(data)} value {data}")
alexa_player = data["alexa_player"]
self.volume_get(alexa_player, data["default_restore_volume"])
self.volume_set(alexa_player, data["volume"])
# Replace and clean message
message_clean = self.replace_regular(data["text"], SUB_VOICE)
self.lg(f"INPUT MESSAGE: {data['text']}")
self.lg(f"MESSAGE CLEAN: {message_clean}")
# Speech time calculator
# words = len(message_clean.split())
# chars = message_clean.count("")
words = len(self.remove_tags(message_clean).split())
chars = self.remove_tags(message_clean).count("")
duration = (words * 0.007) * 60
# Extra time
if self.has_numbers(message_clean):
data["wait_time"] += 4
self.lg(f"OK NUMBER! ADDED EXTRA TIME: {data['wait_time']}")
if (chars / words) > 7 and chars > 90 or data["alexa_audio"] is not None:
data["wait_time"] += 7
self.lg(f"OK ADDED EXTRA TIME: {data['wait_time']}")
# Alexa type-method
if "tts" in data["alexa_type"]:
alexa_data = {"type": "tts"}
else:
data["wait_time"] += 1.5
alexa_data = {
"type": data["alexa_type"],
"method": data["alexa_method"],
}
# TAGS SSML
if data["ssml_switch"] and not "<speak>" in message_clean:
voice = "Alexa" if data["alexa_voice"] not in VOICE_NAMES else data["alexa_voice"]
whisper = data["whisper"]
if "Alexa" in voice and not whisper:
message_clean = self.find_speechcon(message_clean)
message_clean = self.lang_tag(message_clean, data["language"])
if "Alexa" not in voice:
message_clean = self.voice_tag(message_clean, voice)
message_clean = self.audio_tag(data["alexa_audio"]) + message_clean
message_clean = self.prosody_tag(message_clean, data["rate"], data["pitch"], data["ssml_volume"])
# -->
rate = self.inbetween(20, data["rate"], 200) # TODO
if rate < 100:
duration += (100 - rate) * (duration / 100)
elif rate > 100:
duration /= 2
# -->
if whisper:
message_clean = self.effect_tag(message_clean)
if "tts" in data["alexa_type"]:
message_clean = self.speak_tag(message_clean)
self.lg(f"OK SSML TAGS: {message_clean}")
# Estimate reading time
duration += data["wait_time"]
self.lg(f"DURATION-WAIT: {duration} - words: {words} - Chars: {chars}")
# Speak >>>
self.call_service(
NOTIFY + data["alexa_notifier"],
data=alexa_data,
target=alexa_player,
message=message_clean.strip(),
)
time.sleep(duration if duration > 0 else 0)
# Restore volume
self.volume_set(alexa_player, data["volume"], restore=True)
except Exception as ex:
self.log("An error occurred in Alexa Manager (worker): {}".format(ex), level="ERROR")
self.log(f"DATA: {data}", level="ERROR")
self.set_sensor("Alexa Manager - Worker Error ", ex)
self.queue.task_done()
if self.queue.qsize() == 0:
try:
while self._when_tts_done_callback_queue.qsize() > 0:
callback_func = self._when_tts_done_callback_queue.get_nowait()
callback_func() # Call the callback
self._when_tts_done_callback_queue.task_done()
except:
self.log("Errore nel CallBack", level="ERROR")
self.set_sensor("Alexa Manager - CallBack Error ", ex)
pass # Nothing in queue
self.lg("---------------------------------------------------------\n")
def _check_alexa(self, service):
""" Get the media players from the alexa_media service in home assistant. """
self.hass_config = self.get_plugin_config()
components = self.hass_config["components"]
if service in components:
exclude = [service, "this_device", "_apps"]
# Trova servizi alexa_media, alexa_media_xxname...
cehck_alexa = [
s["service"] # .replace("alexa_media_", "media_player.")
for s in self.list_services(namespace="default")
if "notify" in s["domain"] and service in s["service"]
]
self.lg(f"OK, Service: {cehck_alexa}")
# converti servizi alexa_media_ in media_player. e controlla se esistono media_player.xxname...
service_replace = [
mp.replace("alexa_media_", "media_player.") for mp in cehck_alexa if mp != "alexa_media" # Extra
]
self.lg(f"OK, Entity: {service_replace}")
# Filtro lista exclude - player_alexa list media_player
self.player_alexa = [
s for s in service_replace if self.entity_exists(s) and not any(player in s for player in exclude)
]
self.lg(f"OK, found the Alexa Media component. List of media players: {self.player_alexa}")
###---------
# """ GEt Friendly Name from Entity. """
names = [self.friendly_name(name) for name in self.player_alexa]
self.lg(f"FRIENDLY_NAME: {names}")
selectoptions = self.get_state(self.alexa_select_media_player, attribute="options")
self.lg(str(f"INPUT SELECT OPTIONS: {selectoptions} - TYPE: {type(selectoptions)}"))
# controlla se il friendly name esiste nel input select
check_alexa_options = [x for x in self.player_alexa if self.friendly_name(x) in selectoptions]
self.lg(str(f"ENTITY_ID MEDIA_PLAYER IN INPUT SELECTS {check_alexa_options}"))
###---------
##Prova conversione da friendly name ad entity id - return list and dict entity in input select
# selectoptions = self.get_state(self.alexa_select_media_player, attribute="options")
all_state = self.get_state()
self.list_select = []
self.dict_select = {}
for entity, state in all_state.items():
domain, name = entity.split(".")
friendly_name = state["attributes"].get("friendly_name")
if domain in ["media_player", "group", "sensor"] and friendly_name in selectoptions:
self.list_select.append(entity)
for select in selectoptions:
if select.lower() == friendly_name.lower(): # .casefold()
self.dict_select[friendly_name.lower()] = entity
self.lg(str(f"LIST ENTITY_ID SELECT OPTIONS: {self.list_select}"))
self.lg(str(f"DICTIONARY NAME-ENTITY_ID: {self.dict_select}"))
return cehck_alexa
# self.log(
# f"I can't find the Alexa Media component\n- https://github.com/custom-components/alexa_media_player",
# level="ERROR",
# )
return
|
_conn_proc.py
|
import threading as th
import multiprocessing as mp
from modi.task.conn_task import ConnTask
from modi.task.ser_task import SerTask
from modi.task.can_task import CanTask
from modi.task.spp_task import SppTask
class ConnProc(mp.Process):
def __init__(self, recv_q, send_q, conn_mode, module_uuid, verbose,
init_flag, port=None):
super().__init__()
params = [recv_q, send_q, verbose]
if conn_mode.startswith("b"):
params.append(module_uuid)
if conn_mode.startswith('s'):
params.append(port)
self.__task = self.__init_task(conn_mode)(*params)
self.__delay = 0.05 if isinstance(self.__task, SppTask) else 0.001
self.__init_flag = init_flag
def __init_task(self, conn_mode: str) -> ConnTask:
"""Initialize task with given connection mode
:param conn_mode: Desired connection mode
:type conn_mode: str
:return: Corresponding connection task
:rtype: ConnTask
"""
if conn_mode.startswith("b"):
return SppTask
return CanTask if self.__is_modi_pi() else SerTask
@staticmethod
def __is_modi_pi() -> bool:
"""Returns whether connection is on pi
:return: true is on pi
:rtype: bool
"""
return ConnTask.is_on_pi() and \
not ConnTask.is_network_module_connected()
def run(self) -> None:
"""Run the connection
:return: None
"""
self.__task.open_conn()
recv_thread = th.Thread(
target=self.__task.run_recv_data, args=(self.__delay,)
)
recv_thread.daemon = True
recv_thread.start()
send_thread = th.Thread(
target=self.__task.run_send_data, args=(self.__delay,)
)
send_thread.daemon = True
send_thread.start()
self.__init_flag.set()
recv_thread.join()
send_thread.join()
|
autoreload.py
|
import functools
import itertools
import logging
import os
import pathlib
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal(providing_args=['file_path', 'kind'])
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
"""
Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload.
"""
if not termios or not sys.stdin.isatty():
return
attr_list = termios.tcgetattr(sys.stdin)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType):
continue
if module.__name__ == '__main__':
# __main__ (usually manage.py) doesn't always have a __spec__ set.
# Handle this by falling back to using __file__, resolved below.
# See https://docs.python.org/reference/import.html#main-spec
sys_file_paths.append(module.__file__)
continue
if getattr(module, '__spec__', None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = pathlib.Path(filename)
if not path.exists():
# The module could have been removed, don't fail loudly if this
# is the case.
continue
results.add(path.resolve().absolute())
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
if not path.exists():
continue
path = path.resolve().absolute()
# If the path is a file (like a zip file), watch the parent directory.
if path.is_file():
yield path.parent
else:
yield path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watch_file(self, path):
path = Path(path)
if not path.is_absolute():
raise ValueError('%s must be absolute.' % path)
logger.debug('Watching file %s.', path)
self.extra_files.add(path)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug('File %s first seen with mtime %s', filepath, mtime)
continue
elif mtime > old_time:
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug('Watchman error: %s, checking server status.', ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
cache.py
|
from octopus.core import app
from octopus.modules.cache import models
from octopus.lib import plugin
import os
from datetime import datetime, timedelta
from operator import itemgetter
from multiprocessing import Process
class CacheException(Exception):
pass
def load_file(name):
cf = models.CachedFile.pull(name)
# if there is not file to serve, or the file is stale, trigger the regen
if cf is None or cf.is_stale():
trigger_regen(name)
# whether we triggered the regen or not, return the current result. Next caller will
# get the regenerated file
return cf
def trigger_regen(name):
p = Process(target=generate_file, args=(name,))
p.start()
def generate_file(name, respect_timeout=False):
# check that we have a generator for this cache type
generator = app.config.get("CACHE_GENERATORS", {}).get(name, {}).get("class")
if generator is None:
raise CacheException("No generator configured for {x}".format(x=name))
# figure out the timeout on this file
timeout = app.config.get("CACHE_GENERATORS", {}).get(name, {}).get("timeout")
if timeout is None:
raise CacheException("No timeout specified for {x}".format(x=name))
# get the file record to which this pertains (or make one if it is new)
cf = models.CachedFile.pull(name)
if cf is None:
cf = models.CachedFile()
cf.id = name
else:
# if the file is currently generating (perhaps in another thread), just return
if cf.generating:
return
# if the file is not stale, and we are respecting the timeout, just return
if not cf.is_stale() and respect_timeout:
return
# switch the generating flag to true and re-save
cf.generating = True
cf.save() # Note that we don't do a blocking save, because we want to update this record again asap, and this data is throwaway
# create a suitable filename
filename = "{x}_{y}".format(x=name, y=datetime.utcnow().strftime("%Y-%m-%d_%H%M"))
# get the cache directory sorted out
dir = os.path.join(app.config.get("CACHE_DIR"), name)
if not os.path.exists(dir):
os.makedirs(dir)
# finally get the file path ready
filepath = os.path.join(dir, filename)
# now instantiate the class and ask it to generate the file
klazz = plugin.load_class(generator)
inst = klazz()
inst.generate(filepath)
# now calculate the timeout
to = datetime.utcnow() + timedelta(seconds=timeout)
# now update the cached file record
cf.filename = filename
cf.generating = False
cf.timeout = to.strftime("%Y-%m-%dT%H:%M:%SZ")
cf.save()
# finally, clean up the cache directory of any old files
cleanup_cache_dir(dir)
return cf
def cleanup_cache_dir(dir):
# remove all but the two latest files
files = [(c, os.path.getmtime(os.path.join(dir, c)) ) for c in os.listdir(dir)]
sorted_files = sorted(files, key=itemgetter(1), reverse=True)
if len(sorted_files) > 2:
for c, lm in sorted_files[2:]:
os.remove(os.path.join(dir, c))
|
user.py
|
from logger import Logger
from protocol import Header,HeaderParser,Protocol
from mail import sendRecoveryMail
import requests, json, hashlib, os, socket, threading ,time
class URL(object):
local = 'http://127.0.0.1:5000/api/'
remote = 'http://molly.ovh:5000/api/'
class User(object):
def __init__(self, conn, addr):
super().__init__()
self.address = addr
self.socket = conn
self.connected = True
self.uuid = None
def __repr__(self):
return str(self.address)+' '+str(self.uuid)
def quit(self, message: str):
self.connected = False
self.socket.close()
Logger.log('Client closed from:'+str(self.address)+' '+str(message))
def handle(self):
r = self.socket.recv(3)
if r != b'':
headerType, size = HeaderParser.decode(r)
data = Protocol.decode(self.socket.recv(size))
h,p = None,None
if headerType == Header.LOG:
r = requests.get(URL.local+'users', params={'username':data['login']})
j = r.json()
if r.status_code == 200 and j != {} and data['password'] != 'X':
u2 = bytes.fromhex(j['password'])
u1 = bytes.fromhex(data['password'])
if u1[32:] == u2[32:]:
h,p = Protocol.encode(Header.SES, session = self.uuid)
self.transfer(h,p)
Logger.log('User logged in ('+str(data['login'])+')')
return UserLogged(self,j['id'],j['username'])
elif r.status_code == 200 and j != {} and data['password'] == 'X':
h,p = Protocol.encode(Header.LOG,login=data['login'],password=j['password'])
self.transfer(h,p)
return None
h,p = Protocol.encode(Header.ERR, msg = 'Invalid login data')
Logger.log('User login invalid data '+ str(self.address))
elif headerType == Header.REG:
r = requests.post(URL.local+'users', json={'username':data['login'], 'email': data['email'], 'password': data['password']})
if r.status_code == 201:
h,p = Protocol.encode(Header.ACK, msg = 'Created Account')
Logger.log('User registered ')
elif r.status_code == 409:
h,p = Protocol.encode(Header.ERR, msg = 'Account already exists')
msg = r.json()['Message']
Logger.log('User thats already exists creation try ('+str(data['login'])+')')
else:
h,p = Protocol.encode(Header.ERR, msg = 'Invalid register data')
Logger.log('User register invalid data ')
elif headerType == Header.FRP:
ru = requests.get(URL.local+'users', params={'username':data['login']})
j = ru.json()
if ru.status_code == 200:
if j != {}:
mail = j['email']
r = requests.post('https://molly.ovh:5050/token/'+data['login'],verify=False)
j2 = r.json()
token = j2['token']
threading.Thread(target=sendRecoveryMail,args=(mail,token,)).start()
Logger.log('FRP send')
h,p = Protocol.encode(Header.ACK, msg = 'Send recovery mail')
Logger.log('FRP used')
else:
h,p = Protocol.encode(Header.ERR, msg = 'Error occured')
Logger.log('FRP error')
elif headerType == Header.DIS:
raise socket.error(data['msg'])
if h != None and p != None:
self.transfer(h,p)
return None
def transfer(self,h,p):
try:
self.socket.send(h)
self.socket.send(p)
except Exception as ex:
pass
def passwordHash(self, password: str, salt=None):
salt = salt or os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256',password.encode(),salt,10000)
return (salt+key)
class UserLogged(User):
def __init__(self, user: User, dbID, username):
super().__init__(user.socket,user.address)
self.uuid = user.uuid
self.dbID = dbID
self.username = username
self.usersThread = threading.Thread(target=self.userListUpdateThread)
self.usersThread.start()
self.reciever = None
self.historyThread = threading.Thread(target=self.historyUpdateThread)
self.historyThread.start()
def __repr__(self):
return str(self.address)+' '+str(self.uuid)+' '+self.username
def userListUpdateThread(self):
while self.connected:
r = requests.get(URL.local+'users')
l = []
for x in r.json():
if x['username'] != self.username:
l.append(x['username'])
if l != []:
h,p = Protocol.encode(Header.LIS, users = l)
self.transfer(h,p)
time.sleep(10)
def historyUpdateThread(self):
while self.connected:
if self.reciever != None:
r = requests.get(URL.local+'history-manager',params={'first_username':self.username,'second_username':self.reciever})
if r.status_code == 200:
history = []
for x in r.json():
history.append('['+str(x['date'])+'] '+str(x['username'])+': '+str(x['content']))
if history != []:
if len(history) > 200:
history.reverse()
history = history[:200]
history.reverse()
h,p = Protocol.encode(Header.HIS, history = history)
self.transfer(h,p)
time.sleep(0.5)
def handle(self):
r = self.socket.recv(3)
if r != b'':
headerType, size = HeaderParser.decode(r)
data = Protocol.decode(self.socket.recv(size))
h,p = None,None
if headerType == Header.DIS:
raise socket.error('Disconnect')
elif headerType == Header.MSG:
historyID = self.checkHistory(self.username,data['reciever'])
if historyID != None:
r = requests.post(URL.local+'history-manager', json={'history_id':historyID, 'username':self.username, 'content': data['msg']})
elif headerType == Header.DEL:
r = requests.delete(URL.local+'users', params={'username':self.username})
if r.status_code == 200:
h,p = Protocol.encode(Header.ACK, msg = 'Deletion succesfull')
else:
h,p = Protocol.encode(Header.ERR, msg = 'Deletion failed')
elif headerType == Header.CHP:
r = requests.patch(URL.local+'users', json=({'password':data['password']}), params={'username':self.username})
if r.status_code == 200:
h,p = Protocol.encode(Header.ACK, msg = 'Change password succesfull')
else:
h,p = Protocol.encode(Header.ERR, msg = 'Change password failed')
elif headerType == Header.CHM:
r = requests.patch(URL.local+'users', json=({'email':data['email']}) ,params={'username':self.username})
if r.status_code == 200:
h,p = Protocol.encode(Header.ACK, msg = 'Change mail succesfull')
else:
h,p = Protocol.encode(Header.ERR, msg = 'Change mail failed')
elif headerType == Header.UPD:
self.reciever = data['reciever']
if self.reciever != None:
self.checkHistory(self.username,self.reciever)
if h != None and p != None:
self.transfer(h,p)
return None
def checkHistory(self, u1, u2):
r = requests.get(URL.local+'history-manager/historyID', params={'first_username': u1, 'second_username': u2})
if r.status_code == 200:
historyID = r.json()['history_id']
return historyID
elif r.status_code == 404:
r = requests.post(URL.local+'history-manager', json={'first_username': u1, 'second_username': u2})
if r.status_code == 201:
r = requests.get(URL.local+'history-manager/historyID', params={'first_username': u1, 'second_username': u2})
historyID = r.json()['history_id']
return historyID
return None
return None
|
controller_gui.py
|
import threading
import random
from time import sleep
from math import sqrt,cos,sin,pi
from functools import partial
from Tkinter import *
import tkMessageBox as messagebox
from ttk import *
import tkFont
from PIL import Image, ImageTk
import numpy as np
import networkx as nx
from event import myEvent
from Object import Object
win_size = '1100x600'
g_height = 600
g_width = 1100
fr_topo_height = 400
fr_topo_width = 400
qpktThreshold = 0
rpktThreshold = 0
class MyScrollbar(Scrollbar, object):
""" scrollbar management """
def __init__(self, parent, canvas, nodes, node_size, event = object,l_shohid="", c_shohid="", orient="horizental", command=None):
super(MyScrollbar, self).__init__(parent, orient=orient, command=command)
self.cv = canvas
self.nodes = nodes
self.node_size = node_size
self.event = event
self.l_shohid = l_shohid
self.c_shohid = c_shohid
self.orient = orient
def set(self, a, b, nodes={}, node_size=10, l_shohid="", c_shohid=""):
super(MyScrollbar, self).set(a,b)
self.node_size = node_size
self.nodes = nodes
self.l_shohid = l_shohid
self.c_shohid = c_shohid
if self.cv.labelGw != None:
self.cv.labelGw.place_forget()
self.cv.labelRt.place_forget()
self.cv.labelSv.place_forget()
self.cv.labelVt.place_forget()
self.cv.labelCt.place_forget()
x0 = self.cv.canvasx(0)
y0 = self.cv.canvasy(0)
for node, pos in self.nodes.items():
wx = pos[0]-x0
wy = pos[1]-y0
if node[15:] == "00" :
if node[0:] == "00:00:00:04:15:00":
self.cv.labelGw.place(x=wx , y=wy+self.node_size)
self.cv.labelCt.place(x=wx+6*self.node_size, y=10*self.node_size+wy+sqrt(3)*self.node_size)
if node[0:] == "00:00:00:05:15:00":
self.cv.labelRt.place(x=wx , y=wy+self.node_size)
else:
if node[0:] == "00:00:00:00:03:03":
self.cv.labelSv.place(x=wx , y=wy+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv.labelVt.place(x=wx , y=wy+self.node_size)
if self.l_shohid == "show":
self.cv.labelGw.place_forget()
self.cv.labelRt.place_forget()
self.cv.labelSv.place_forget()
self.cv.labelVt.place_forget()
self.cv.labelCt.place_forget()
if self.c_shohid == "show":
self.cv.labelCt.place_forget()
x = self.cv.ctrPos
self.cv.itemconfig(self.cv.controller, state="normal")
self.cv.coords(self.cv.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
self.cv.itemconfig(self.cv.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv.itemconfig(self.cv.controllers[node], state="hidden")
class ControllerGui():
def __init__(self, event, sw_mac, h_mac, topology):
""" init
"""
self.event = event
self.root = Toplevel()
self.root.title("Controller GUI")
self.root.geometry(win_size)
self.sw_mac = sw_mac
self.h_mac = h_mac
self.topology = topology
self.initStyle()
self.fr_bg = Frame(self.root, height = g_height-100, width = g_width)
self.fr_tp = Frame(self.fr_bg, height = 100, width = g_width)
# self.fr_tb = Frame(self.fr_bg, height = g_height-100, width = g_width/2)
self.cv_tp = Canvas(self.fr_bg, height = 100, width = g_width,highlightthickness=0)
self.cv_tp.create_image(0,0, image=self.t_bgPhoto, anchor = "nw")
self.fr_topo = Frame(self.fr_bg, height = fr_topo_height, width = fr_topo_width)
self.cv_topo = Canvas(self.fr_topo,bg = self.bg, height = fr_topo_height, width = fr_topo_width, highlightthickness=0)
self.cv_topo.create_image(0,0, image=self.topo_bgPhoto, anchor="nw")
self.cv_topo.labelGw = None
self.cv_topo.labelRt = None
self.cv_topo.labelSv = None
self.cv_topo.labelVt = None
self.cv_topo.labelCt = None
self.fr_mid = Frame(self.fr_bg, height = 400, width = 300, style="TFrame")
self.fr_table = Frame(self.fr_bg, height = 400, width = 400)
self.cv_btm= Canvas(self.fr_tp, height = 100, width = g_width,highlightthickness=0)
self.cv_btm.create_image(0,0, image=self.b_bgPhoto, anchor = "nw")
self.var = StringVar()
self.L1 = Label(self.fr_mid, textvariable=self.var, width=30, anchor="center", background=self.bg)
self.thres = Label(self.fr_mid, text="Threshold:", anchor="center", background=self.bg)
self.zoom = Object(x1=0, y1=0, x2=0, y2=0, area=0,
rect=self.cv_topo.create_rectangle(0,0,0,0),
width = fr_topo_width,
height = fr_topo_height)
self.zoomState = "Not"
self.zoomIn = Button(self.fr_mid, style="in.zoom.TButton", command=partial(self.topoZoom, InOut="in"))
self.zoomOut = Button(self.fr_mid, style="out.zoom.TButton", command=partial(self.topoZoom, InOut="out"))
self.usrIn = StringVar()
self.usrIn.set("")
self.thresIn = Entry(self.fr_mid, textvariable=self.usrIn, width=8, font=self.fonts)
self.enter = Button(self.fr_mid, text="Enter", command=self.getThreshold, width=5)
self.tree = Treeview(self.fr_table, columns=('col1', 'col2', 'col3', 'col4') ,show='headings')
self.ybar = Scrollbar(self.fr_table, orient=VERTICAL, command=self.tree.yview)
self.tree.column('col1', width=100, anchor='center')
self.tree.column('col2', width=100, anchor='center')
self.tree.column('col3', width=92, anchor='center')
self.tree.column('col4', width=92, anchor='center')
self.tree.heading('col1', text='name')
self.tree.heading('col2', text='port')
self.tree.heading('col3', text='q_pkt')
self.tree.heading('col4', text='r_pkt')
self.tree.configure(yscrollcommand=self.ybar.set)
self.tree.bind("<Double-1>", self.dbClick2ShowNode)
self.ge_network()
self.create_node()
self.cv_topo.l_shohid = StringVar()
self.cv_topo.l_shohid.set("show")
self.cv_topo.c_shohid = StringVar()
self.cv_topo.c_shohid.set("hide")
self.cv_topo.ctrPos = []
self.cv_topo.ctrCenter = []
self.button_quit = Button(self.fr_mid, style="Q.TButton",command=self.quit)
self.button_refresh = Button(self.fr_mid, style="R.TButton", command=self.refresh_network)
self.topo_xscroll = MyScrollbar(self.fr_topo,
canvas = self.cv_topo, nodes = self.nodes,
node_size = self.node_size, event = self.event,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get(),
orient="horizontal", command=self.cv_topo.xview)
self.topo_yscroll = MyScrollbar(self.fr_topo,
canvas = self.cv_topo, nodes = self.nodes,
node_size = self.node_size, event = self.event,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get(),
orient="vertical", command=self.cv_topo.yview)
self.cv_topo.configure(
yscrollcommand=partial(self.topo_yscroll.set,
node_size=self.node_size,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand=partial(self.topo_xscroll.set,
node_size = self.node_size,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get()))
#self.cv.pack()
self.cv_topo.bind('<Motion>' , self.move_handler)
self.cv_topo.bind('<Button-1>', self.click_handler)
self.button_lShowHide = Button(self.fr_mid, style="h.label.TButton", textvariable=self.cv_topo.l_shohid, command=self.labelShowHide)
self.button_cShowHide = Button(self.fr_mid, style="v.controller.TButton", textvariable=self.cv_topo.c_shohid, command=self.controllerShowHide)
self.edgeWarn_th = threading.Thread(target=self.edge_traffic_warn, args=(self.event,self.topology, self.cv_topo))
self.edgeWarn_th.setDaemon(True)
self.edgeWarn_th.start()
self.v = StringVar()
self.on_off_xpos = 150
self.on_off_ypos = 500
# self.rate_set = []
# for text, mode in modes:
# self.rate_set.append(Radiobutton(self.fr_mid, text=text, variable=self.v, value=mode, command=self.mitigation))
# self.on_off_ypos += 25
self.rate_set = Checkbutton(self.fr_mid, text="Mitigation", variable=self.v, onvalue="On", offvalue="Off", command=self.mitigation)
self.typeSetting()
self.labelShowHide()
def typeSetting(self):
""" manage objects position """
self.fr_bg.pack()
self.fr_tp.pack(side="bottom")
# self.fr_tb.pack(side="right")
# self.L1.place(x=85, y=100)
# self.thres.place(x=480, y=420)
# self.thresIn.place(x=600, y=420)
# self.button_lShowHide.place(x=10, y=370)
# self.enter.place(x=655, y=420)
# self.button_quit.place(x=850, y=450)
# self.button_refresh.place(x=850, y=400)
self.L1.grid(row=0, column=0, pady=(0,20))
self.thres.grid(row=1, column=0, sticky="W")
self.thresIn.grid(row=1, column=0)
self.enter.grid(row=1, column=0, sticky="E")
self.zoomIn.grid(row=2, column=0)
self.zoomOut.grid(row=3, column=0)
self.button_lShowHide.grid(row=4, column=0)
self.button_cShowHide.grid(row=5, column=0)
self.rate_set.grid(row=6, column=0, pady=(10,0))
# self.rate_set[1].grid(row=6, column=0)
self.button_refresh.grid(row=8, column = 0, pady=(10,0))
self.button_quit.grid(row=9, column=0)
self.topo_xscroll.pack(side="bottom", fill="x", ipady=0)
self.topo_yscroll.pack(side="right", fill="y", ipadx=0)
self.cv_topo.pack(expand="Yes", anchor="center", side="left")
self.cv_tp.pack(expand="Yes", side="top", fill="both",ipadx=0,ipady=0,padx=0,pady=0)
self.fr_topo.pack(expand="Yes", anchor="center",side="left", fill="both")
self.fr_mid.pack(expand="Yes",side="left", anchor="center")
self.fr_table.pack(expand="Yes", side="right",anchor="center",fill="both")
self.cv_btm.pack(expand="Yes", side="bottom", fill="both")
def initStyle(self):
""" manage style """
self.node_size = 10
self.fonts = ("arial", 12)
# self.fonts = tkFont.Font(family="mono", size=12)
#################### Color ####################
self.bg_tp = "black"
self.bg = "white"
self.host_color = "white"
self.sw_color = "white"
self.r_color = "#ffcc66"
self.q_color = "#B585BE"
#self.ov_r_color = "red"
#self.ov_q_color = "yellow"
self.notice_color = "#5D5D5D"
self.ctrline_color = "#d7d7ff"
#################### Img ####################
quitImage = Image.open('Img/up_quit.png').resize((180,42), Image.ANTIALIAS)
refreshImage = Image.open('Img/up_refresh.png').resize((180,42), Image.ANTIALIAS)
b_quitImage = Image.open('Img/down_quit.png').resize((180,42), Image.ANTIALIAS)
b_refreshImage = Image.open('Img/down_refresh.png').resize((180,42), Image.ANTIALIAS)
self.quitPhoto = ImageTk.PhotoImage(quitImage)
self.refreshPhoto = ImageTk.PhotoImage(refreshImage)
self.b_quitPhoto = ImageTk.PhotoImage(b_quitImage)
self.b_refreshPhoto = ImageTk.PhotoImage(b_refreshImage)
TBgImage = Image.open('Img/top_bg.png').resize((1100,100), Image.ANTIALIAS)
BBgImage = Image.open('Img/bottom_bg.png').resize((1100,100), Image.ANTIALIAS)
TopoBgImage = Image.open('Img/gray_bg.png').resize((400,400), Image.ANTIALIAS)
self.t_bgPhoto = ImageTk.PhotoImage(TBgImage)
self.b_bgPhoto = ImageTk.PhotoImage(BBgImage)
self.topo_bgPhoto = ImageTk.PhotoImage(TopoBgImage)
upzinImage = Image.open('Img/up_zoomin.png').resize((180,42), Image.ANTIALIAS)
downzinImage = Image.open('Img/down_zoomin.png').resize((180,42), Image.ANTIALIAS)
actzinImage = Image.open('Img/active_zoomin.png').resize((180,42), Image.ANTIALIAS)
diszinImage = Image.open('Img/disable_zoomin.png').resize((180,42), Image.ANTIALIAS)
self.upzinPhoto = ImageTk.PhotoImage(upzinImage)
self.downzinPhoto = ImageTk.PhotoImage(downzinImage)
self.actzinPhoto = ImageTk.PhotoImage(actzinImage)
self.diszinPhoto = ImageTk.PhotoImage(diszinImage)
upzoutImage = Image.open('Img/up_zoomout.png').resize((180,42), Image.ANTIALIAS)
downzoutImage = Image.open('Img/down_zoomout.png').resize((180,42), Image.ANTIALIAS)
actzoutImage = Image.open('Img/active_zoomout.png').resize((180,42), Image.ANTIALIAS)
diszoutImage = Image.open('Img/disable_zoomout.png').resize((180,42), Image.ANTIALIAS)
self.upzoutPhoto = ImageTk.PhotoImage(upzoutImage)
self.downzoutPhoto = ImageTk.PhotoImage(downzoutImage)
self.actzoutPhoto = ImageTk.PhotoImage(actzoutImage)
self.diszoutPhoto = ImageTk.PhotoImage(diszoutImage)
upvlImage = Image.open('Img/up_vlabel.png').resize((180,42), Image.ANTIALIAS)
downvlImage = Image.open('Img/down_vlabel.png').resize((180,42), Image.ANTIALIAS)
uphlImage = Image.open('Img/up_hlabel.png').resize((180,42), Image.ANTIALIAS)
downhlImage = Image.open('Img/down_hlabel.png').resize((180,42), Image.ANTIALIAS)
upvcImage = Image.open('Img/up_vcontroller.png').resize((180,42), Image.ANTIALIAS)
downvcImage = Image.open('Img/down_vcontroller.png').resize((180,42), Image.ANTIALIAS)
uphcImage = Image.open('Img/up_hcontroller.png').resize((180,42), Image.ANTIALIAS)
downhcImage = Image.open('Img/down_hcontroller.png').resize((180,42), Image.ANTIALIAS)
self.upvlPhoto = ImageTk.PhotoImage(upvlImage)
self.downvlPhoto = ImageTk.PhotoImage(downvlImage)
self.uphlPhoto = ImageTk.PhotoImage(uphlImage)
self.downhlPhoto = ImageTk.PhotoImage(downhlImage)
self.upvcPhoto = ImageTk.PhotoImage(upvcImage)
self.downvcPhoto = ImageTk.PhotoImage(downvcImage)
self.uphcPhoto = ImageTk.PhotoImage(uphcImage)
self.downhcPhoto = ImageTk.PhotoImage(downhcImage)
#################### Style ####################
self.style = Style()
# self.style.configure("TButton",
# font=self.fonts, relief="flat")
# self.style.map("TButton",
# # background=[("active", self.bg), ("disabled", self.bg)],
# background=[("active", "pink"), ("disabled", "#f0f0f0")],
# foreground=[("active", "white"), ("disabled", "white")]
# )
self.style.map("Selected.TButton",
background=[("active", "pink"), ("disabled", "#f0f0f0")],
foreground=[("active", "white"), ("disabled", "white")]
)
self.style.configure("Q.TButton",
background=self.bg,
font=self.fonts, relief="flat",
image = self.quitPhoto, padding=0,
)
self.style.map("Q.TButton",
background=[("active",self.bg)],
image=[("active",self.b_quitPhoto)],
)
self.style.configure("R.TButton",
background=self.bg,
font=self.fonts, relief="flat",
image = self.refreshPhoto, padding=0)
self.style.map("R.TButton",
background=[("active",self.bg)],
image=[("active",self.b_refreshPhoto)],
)
self.style.configure("zoom.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("zoom.TButton",
background=[("active", self.bg), ("disabled", self.bg)])
self.style.configure("in.zoom.TButton", image = self.upzinPhoto)
self.style.map("in.zoom.TButton",
image = [("active", self.actzinPhoto), ("disabled", self.diszinPhoto)])
self.style.configure("S.in.zoom.TButton", image = self.downzinPhoto)
self.style.configure("out.zoom.TButton", image = self.upzoutPhoto)
self.style.map("out.zoom.TButton",
image = [("active", self.actzoutPhoto), ("disabled", self.diszoutPhoto)])
self.style.configure("S.out.zoom.TButton", image = self.downzoutPhoto)
self.style.configure("label.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("label.TButton",
background=[("active", self.bg)])
self.style.configure("v.label.TButton", image = self.upvlPhoto)
self.style.map("v.label.TButton",
image = [("active", self.downvlPhoto)])
self.style.configure("h.label.TButton", image = self.uphlPhoto)
self.style.map("h.label.TButton",
image = [("active", self.downhlPhoto)])
self.style.configure("controller.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("controller.TButton",
background=[("active", self.bg)])
self.style.configure("v.controller.TButton", image = self.upvcPhoto)
self.style.map("v.controller.TButton",
image = [("active", self.downvcPhoto)])
self.style.configure("h.controller.TButton", image = self.uphcPhoto)
self.style.map("h.controller.TButton",
image = [("active", self.downhcPhoto)])
self.style.configure("TFrame",
background = self.bg,
font=self.fonts
)
self.style.configure("TLabel",
background = self.bg,
font=self.fonts
)
self.style.configure("TCheckbutton",
font=self.fonts,
background = self.bg)
def ge_network(self):
""" generate network """
self.G = nx.Graph()
pos = {}
fixed = []
connected_gw = []
for port, node in self.event.node_links["s4"]:
if node != "s5":
connected_gw.append(node)
myCos = lambda x: np.cos(np.deg2rad(x))
mySin = lambda x: np.sin(np.deg2rad(x))
for s, mac in sorted(self.sw_mac.items()):
self.G.add_node(mac.encode('utf-8'))
if s in connected_gw:
pos[mac] = (0.2+1.1*myCos(90+15.0*connected_gw.index(s)), -1.4+connected_gw.index(s)*0.225)
# pos[mac] = (-1, -1.2+connected_gw.index(s)*0.225)
for port, node in self.event.node_links[s]:
if node[0] == 's':
pos[self.sw_mac[node]] = (-1.2,pos[mac][1])
fixed.append(self.sw_mac[node])
for p,n in self.event.node_links[node]:
if n[0] == 'h':
pos[self.h_mac[n]] = (-1.7, pos[mac][1])
fixed.append(self.h_mac[n])
elif node[0] == 'h':
pos[self.h_mac[node]] = (-1.7, pos[mac][1])
fixed.append(self.h_mac[node])
fixed.append(mac)
for h, mac in sorted(self.h_mac.items()):
self.G.add_node(mac.encode('utf-8'))
# pos[mac] = (0,int(h[1:])/15)
# fixed.append(mac)
edge = []
for no, link in sorted(self.topology.items()):
keys = link.keys()
edge.append((keys[0],keys[1]))
self.G.add_edges_from(edge)
pos["00:00:00:04:15:00"] = (0.2,0)
pos["00:00:00:05:15:00"] = (0.7,0)
pos["00:00:00:00:03:03"] = (1.5,0.5)
pos["00:00:00:00:02:02"] = (1.5,-0.5)
pos["00:00:00:03:15:00"] = (1.1,0.25)
pos["00:00:00:02:15:00"] = (1.1,-0.25)
fixed.append("00:00:00:04:15:00")
fixed.append("00:00:00:05:15:00")
fixed.append("00:00:00:00:03:03")
fixed.append("00:00:00:00:02:02")
fixed.append("00:00:00:03:15:00")
fixed.append("00:00:00:02:15:00")
self.links = self.G.edges # [[mac1,mac2],[mac3,mac4],...]
self.nodes = nx.spring_layout(self.G, pos=pos, fixed=fixed) # {mac1:[x1,y1], mac2:[x2, y2]}
def refresh_network(self):
""" refresh network """
self.node_size = 10
self.G.clear()
self.cv_topo.delete("all")
self.cv_topo.labelGw.destroy()
self.cv_topo.labelRt.destroy()
self.cv_topo.labelSv.destroy()
self.cv_topo.labelVt.destroy()
self.cv_topo.labelCt.destroy()
self.event.cleanObjID()
self.ge_network()
self.cv_topo.create_image(0,0, image=self.topo_bgPhoto, anchor="nw")
self.create_node()
self.zoom.width = fr_topo_width
self.zoom.height = fr_topo_height
self.cv_topo.configure(scrollregion=(0,0,self.zoom.width,self.zoom.height))
self.topoZoom(InOut = self.zoomState)
self.zoomIn.state(["!disabled"])
self.zoomOut.state(["!disabled"])
self.cv_topo.l_shohid.set("show")
self.cv_topo.c_shohid.set("show")
self.labelShowHide()
self.controllerShowHide()
def create_node(self):
""" create nodes and lines """
for node, pos in self.nodes.items():
pos[0] = (pos[0]+2)*100
pos[1] = (pos[1]+2)*100
for link in self.links:
if self.event.getQR(link[0], link[1], 1) == 'q':
# link[0] -> half : query
No = self.cv_topo.create_line(
self.nodes[link[0]][0]+self.node_size/2,
self.nodes[link[0]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.q_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
# link[1] -> half : response
No = self.cv_topo.create_line(
self.nodes[link[1]][0]+self.node_size/2,
self.nodes[link[1]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.r_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
elif self.event.getQR(link[0], link[1], 1) == 'r':
# link[1] -> half : query
No = self.cv_topo.create_line(
self.nodes[link[1]][0]+self.node_size/2,
self.nodes[link[1]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.q_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
# link[0] -> half : response
No = self.cv_topo.create_line(
self.nodes[link[0]][0]+self.node_size/2,
self.nodes[link[0]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.r_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
self.switches = {}
self.hosts = {}
self.cv_topo.controllers = {}
for node, pos in self.nodes.items():
if node[15:] == "00" :
# sw = self.cv.create_image(pos[0]+10, pos[1]+10, image=self.photo_sw)
sw = self.cv_topo.create_oval(pos[0], pos[1], pos[0]+self.node_size, pos[1]+self.node_size, fill=self.sw_color)
self.switches[node] = sw
if node[0:] == "00:00:00:04:15:00":
self.cv_topo.labelGw = Label(self.cv_topo, text="Gateway\n Switch", width=8, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelGw.place(x=pos[0] , y=pos[1]+self.node_size)
self.cv_topo.controller = self.cv_topo.create_polygon(
pos[0]+6*self.node_size, 10*self.node_size+pos[1],
pos[0]+7*self.node_size, 10*self.node_size+pos[1],
pos[0]+7.5*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size/2,
pos[0]+7*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size,
pos[0]+6*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size,
pos[0]+5.5*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size/2, fill="white", outline="black")
self.cv_topo.labelCt = Label(self.cv_topo, text="Controller", width=8, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.ctrCenter = [(self.cv_topo.ctrPos[0]+self.cv_topo.ctrPos[2])/2, self.cv_topo.ctrPos[5]]
self.cv_topo.labelCt.place(x=pos[0]+6*self.node_size, y=10*self.node_size+pos[1]+sqrt(3)*self.node_size)
if node[0:] == "00:00:00:05:15:00":
self.cv_topo.labelRt = Label(self.cv_topo, text="Router", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelRt.place(x=pos[0] , y=pos[1]+self.node_size)
else:
host = self.cv_topo.create_rectangle(pos[0], pos[1], pos[0]+self.node_size, pos[1]+self.node_size, fill=self.host_color, outline="black")
# host = self.cv.create_image(pos[0]+10, pos[1]+10, image=self.photo_host)
self.hosts[node] = host
if node[0:] == "00:00:00:00:03:03":
self.cv_topo.labelSv = Label(self.cv_topo, text=" DNS\nServer", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelSv.place(x=pos[0] , y=pos[1]+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv_topo.labelVt = Label(self.cv_topo, text="Victim", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelVt.place(x=pos[0] , y=pos[1]+self.node_size)
for node, pos in self.nodes.items():
ctrx = self.cv_topo.ctrCenter[0]
ctry = self.cv_topo.ctrCenter[1]
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
ct = self.cv_topo.create_line(pos[0]+self.node_size/2, pos[1]+self.node_size/2, ctrx, ctry, fill=self.ctrline_color, width=2)
self.cv_topo.controllers[node] = ct
for node, pos in self.nodes.items():
if node[15:] == "00":
self.cv_topo.tag_raise(self.switches[node])
self.cv_topo.tag_raise(self.cv_topo.controller)
self.overlaplist = []
self.comparelist = []
for no, link in sorted(self.topology.items()):
mac1 = link.keys()[0]
mac2 = link.keys()[1]
self.overlaplist.append(self.event.getObjID(mac1, mac2)[0])
self.overlaplist.append(self.event.getObjID(mac1, mac2)[1])
self.comparelist = self.overlaplist
for Id in self.overlaplist:
flag = 0
if self.comparelist == None:
break
del self.comparelist[self.comparelist.index(Id)]
x1, y1, x2, y2 = self.cv_topo.coords(Id)
result = self.cv_topo.find_overlapping(x1, y1, x2, y2)
for x in self.comparelist:
x_pos = self.cv_topo.coords(x)
if x_pos in result:
self.refresh_network()
flag = 1
break
if flag == 1:
break
def edge_traffic_warn(self, event, topology, cv_topo):
""" detect which edge is busy, warn user via color changing """
while event.is_set() is True:
pktMax = 0
edgeWidth_q = 2
edgeWidth_r = 2
for no, link in sorted(topology.items()):
mac1 = link.keys()[0]
mac2 = link.keys()[1]
pktNum_q = event.getPktNum(mac1, mac2, 'q')
pktNum_r = event.getPktNum(mac1, mac2, 'r')
pktMax = pktNum_q if pktNum_q > pktMax else pktMax
pktMax = pktNum_r if pktNum_r > pktMax else pktMax
pktMax = 20 if pktMax < 20 else pktMax
if pktNum_q <= qpktThreshold:
edgeWidth_q = (pktNum_q%5)+2
edgeWidth_q = 2 if edgeWidth_q < 2 else edgeWidth_q
cv_topo.itemconfig(event.getObjID(mac1, mac2)[0], fill=self.q_color, width=edgeWidth_q)
elif pktNum_q > qpktThreshold:
edgeWidth_q = int(pktNum_q*20/pktMax)
edgeWidth_q = 7 if edgeWidth_q < 7 else edgeWidth_q
cv_topo.itemconfig(event.getObjID(mac1, mac2)[0], fill=self.edgeColorCtr(self.q_color, edgeWidth_q, "q"), width=edgeWidth_q)
if pktNum_r <= rpktThreshold:
edgeWidth_r = (pktNum_r%5)+2
edgeWidth_r = 2 if edgeWidth_r < 2 else edgeWidth_r
cv_topo.itemconfig(event.getObjID(mac1, mac2)[1], fill=self.r_color, width=edgeWidth_r)
elif pktNum_r > rpktThreshold:
edgeWidth_r = int(pktNum_r*20/pktMax)
edgeWidth_r = 7 if edgeWidth_r < 7 else edgeWidth_r
cv_topo.itemconfig(event.getObjID(mac1, mac2)[1], fill=self.edgeColorCtr(self.r_color, edgeWidth_r, "r"), width=edgeWidth_r)
self.labelShowHide()
self.labelShowHide()
for i in range(0, 10):
if event.is_set() is False:
break
sleep(1)
def edgeColorCtr(self, color, width, pkttype="q"):
""" make line color change with its width """
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
if pkttype == "q":
while width > 6:
g -= 15
width -= 2
elif pkttype == "r":
while width > 6:
g -= 10
b -= 10
width -= 2
return "#{0:02x}{1:02x}{2:02x}".format(r,g,b)
def mitigation(self):
""" call conttoller to open defense system """
if self.v.get() == "On":
self.event.setMeterFlag(1)
# messagebox.showinfo("Mitigation is opened", "Our defense system is operating", parent=self.root)
print "Mitigation is opened"
elif self.v.get() == "Off":
self.event.setMeterFlag(0)
# messagebox.showinfo("Mitigation is closed", "Our defense system is stopped operating", parent=self.root)
print "Mitigation is closed"
def dbClick2ShowNode(self, event):
""" click one row to show node position """
for s_mac, pos in self.switches.items():
self.cv_topo.itemconfig(self.switches[s_mac], fill=self.sw_color)
for h_mac, pos in self.hosts.items():
self.cv_topo.itemconfig(self.hosts[h_mac], fill=self.host_color)
name = self.tree.item(self.tree.selection())['values'][0]
if name == "DNS Server":
name = "h3"
elif name == "victim":
name = self.event.getVictim().name
elif name == "gateway sw":
name = "s4"
elif name == "router":
name = "s5"
mac = self.event.name2mac(name)
x1, y1, x2, y2 = 0,0,0,0
if mac[15:] == "00":
self.cv_topo.itemconfig(self.switches[mac], fill=self.notice_color)
else:
self.cv_topo.itemconfig(self.hosts[mac], fill=self.notice_color)
x,y = self.nodes[mac]
borderX1 = self.cv_topo.canvasx(fr_topo_width/2-self.node_size/2)
borderY1 = self.cv_topo.canvasy(fr_topo_width/2-self.node_size/2)
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
while borderX1 > x and self.cv_topo.canvasx(0) > 0:
self.cv_topo.xview_scroll(-1,"unit")
borderX1 = self.cv_topo.canvasx(fr_topo_width/2-self.node_size/2)
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
while borderX2 < x and self.cv_topo.canvasx(fr_topo_width) < self.zoom.width:
self.cv_topo.xview_scroll(1,"unit")
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
while borderY1 > y and self.cv_topo.canvasy(0) > 0:
self.cv_topo.yview_scroll(-1,"unit")
borderY1 = self.cv_topo.canvasy(fr_topo_width/2-self.node_size/2)
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
while borderY2 < y and self.cv_topo.canvasy(fr_topo_height) < self.zoom.height:
self.cv_topo.yview_scroll(1,"unit")
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
def quit(self):
""" end the controller gui """
self.G.clear()
self.cv_topo.delete("all")
#self.cv.delete("all")
self.root.destroy()
self.event.clear()
# exit()
def move_handler(self, event):
""" detect if mouse is in the zone of node, show information """
self.var.set('')
for node, pos in self.nodes.items():
if pos[0] < self.cv_topo.canvasx(event.x) < pos[0]+self.node_size and pos[1] < self.cv_topo.canvasy(event.y) < pos[1]+self.node_size:
name = self.event.mac2name(node)
if node[15:] == "00" :
self.var.set(name+" : "+node)
else:
self.var.set(name+" : "+node)
break
def click_handler(self, event):
""" click one node to show information """
if self.tree != None:
self.tree.pack_forget()
self.ybar.pack_forget()
x = self.tree.get_children()
for item in x:
self.tree.delete(item)
for node, pos in self.nodes.items():
if pos[0] < self.cv_topo.canvasx(event.x) < pos[0]+self.node_size and pos[1] < self.cv_topo.canvasy(event.y) < pos[1]+self.node_size:
for s_mac, pos in self.switches.items():
self.cv_topo.itemconfig(self.switches[s_mac], fill=self.sw_color)
for h_mac, pos in self.hosts.items():
self.cv_topo.itemconfig(self.hosts[h_mac], fill=self.host_color)
# self.tree = Treeview(self.fr_table, columns=('col1', 'col2', 'col3', 'col4') ,show='headings')
inf = self.event.getNodeInf(node)
for i in inf:
self.tree.insert('', 'end', values=i)
# self.tree.place(x=480, y=170)
# self.ybar.place(x=800, y=170, height=218)
self.tree.pack(side="left", fill='both', pady=60)
self.ybar.pack(side="left", fill='y', pady=60)
def getThreshold(self):
""" change the threshold of mitigation """
try:
int(self.usrIn.get())
except ValueError:
self.usrIn.set("")
messagebox.showerror("Error", "You enter the wrong type !!\nPlease enter a number with type \"int\"", parent=self.root)
else:
if 0 <= int(self.usrIn.get()) <= 1000:
self.event.thr_res_num = int(self.usrIn.get())
qpktThreshold = self.usrIn.get()
rpktThreshold = self.usrIn.get()
print "You change the threshold to " + str(self.event.thr_res_num)
else:
self.usrIn.set("")
messagebox.showwarning("Warning", "Please enter a number which value is between 0 to 1000 (both includiing) !!", parent=self.root)
def labelShowHide(self):
""" show and hide all labels on topology """
if self.cv_topo.l_shohid.get() == "show":
x0 = self.cv_topo.canvasx(0)
y0 = self.cv_topo.canvasy(0)
for node, pos in self.nodes.items():
wx = pos[0] - x0
wy = pos[1] - y0
if node[15:] == "00" :
if node[0:] == "00:00:00:04:15:00":
self.cv_topo.labelGw.place(x=wx , y=wy+self.node_size)
if self.cv_topo.c_shohid.get() == "hide":
self.cv_topo.labelCt.place(x=wx+6*self.node_size, y=10*self.node_size+wy+sqrt(3)*self.node_size)
elif self.cv_topo.c_shohid.get() == "show":
self.cv_topo.labelCt.place_forget()
if node[0:] == "00:00:00:05:15:00":
self.cv_topo.labelRt.place(x=wx , y=wy+self.node_size)
else:
if node[0:] == "00:00:00:00:03:03":
self.cv_topo.labelSv.place(x=wx , y=wy+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv_topo.labelVt.place(x=wx , y=wy+self.node_size)
self.cv_topo.l_shohid.set("hide")
self.button_lShowHide.configure(style = "v.label.TButton")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
elif self.cv_topo.l_shohid.get() == "hide":
self.cv_topo.labelGw.place_forget()
self.cv_topo.labelRt.place_forget()
self.cv_topo.labelSv.place_forget()
self.cv_topo.labelVt.place_forget()
self.cv_topo.labelCt.place_forget()
self.cv_topo.l_shohid.set("show")
self.button_lShowHide.configure(style = "h.label.TButton")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
def controllerShowHide(self):
""" show and hide controller, its label and line which is connected with it """
if self.cv_topo.c_shohid.get() == "show":
x = self.cv_topo.ctrPos
self.cv_topo.ctrCenter = [(x[0]+x[2])/2, x[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="normal")
self.cv_topo.coords(self.cv_topo.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
self.button_cShowHide.configure(style = "v.controller.TButton")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="normal")
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.c_shohid.set("hide")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
elif self.cv_topo.c_shohid.get() == "hide":
self.cv_topo.labelCt.place_forget()
self.button_cShowHide.configure(style = "h.controller.TButton")
self.cv_topo.c_shohid.set("show")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.ctrCenter = [(self.cv_topo.ctrPos[0]+self.cv_topo.ctrPos[2])/2, self.cv_topo.ctrPos[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="hidden")
def zoomRecord(self, event):
""" record the mouse position you clicked """
self.zoom.x1 = self.cv_topo.canvasx(event.x)
self.zoom.y1 = self.cv_topo.canvasy(event.y)
def zoomCreate(self, event):
""" record the position in the rectangle area you chose """
self.cv_topo.delete(self.zoom.rect)
self.zoom.x2 = self.cv_topo.canvasx(event.x)
self.zoom.y2 = self.cv_topo.canvasy(event.y)
self.zoom.rect = self.cv_topo.create_rectangle(self.zoom.x1, self.zoom.y1, self.zoom.x2, self.zoom.y2)
self.zoom.area = abs(self.zoom.x2-self.zoom.x1)*abs(self.zoom.y2-self.zoom.y1)
def zoomRelease(self, event=None, InOut="in"):
""" topology zoom in and out """
op = "*" if InOut=="in" else "/"
if self.zoom.area < 1:
self.zoom.area = 1
mag = sqrt((400*400)/self.zoom.area)
if mag >= 8:
mag = 8
elif mag >= 4:
mag = 4
elif mag >= 2:
mag = 2
elif mag >= 0:
mag = 1.5
self.zoom.width = eval("self.zoom.width "+op+"mag")
self.zoom.height= eval("self.zoom.height"+op+"mag")
if fr_topo_width-50 < self.zoom.width < fr_topo_width+50:
self.zoom.width = fr_topo_width
self.zoom.height = fr_topo_height
if self.cv_topo.c_shohid.get() == "show":
y = self.cv_topo.ctrPos
x = [eval("i"+op+"mag") for i in y]
self.cv_topo.ctrCenter = [(x[0]+x[2])/2, x[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="normal")
self.cv_topo.coords(self.cv_topo.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="normal")
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.ctrPos = x
self.cv_topo.configure(scrollregion=(0,0,self.zoom.width,self.zoom.height))
self.cv_topo.yview_moveto(eval("self.zoom.y1"+op+"mag")/self.zoom.height)
self.cv_topo.xview_moveto(eval("self.zoom.x1"+op+"mag")/self.zoom.width)
self.node_size = eval("self.node_size "+op+" mag")
for node, pos in self.nodes.items():
self.nodes[node] = [eval("pos[0] "+op+" mag"), eval("pos[1] "+op+" mag")]
result = self.cv_topo.find_overlapping(0, 0, 10000, 10000)
for Id in result:
ords = self.cv_topo.coords(Id)
z = [eval("o"+op+"mag") for o in ords]
if len(ords) == 4:
self.cv_topo.coords(Id, z[0], z[1], z[2], z[3])
if len(ords) == 12:
self.cv_topo.coords(Id,
z[0], z[1], z[2], z[3],
z[4], z[5], z[6], z[7],
z[8], z[9], z[10], z[11])
self.labelShowHide()
self.labelShowHide()
self.controllerShowHide()
self.controllerShowHide()
self.cv_topo.delete(self.zoom.rect)
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
if self.cv_topo.c_shohid.get() == "show":
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.itemconfig(self.cv_topo.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="hidden")
tmp = self.zoomState
if self.zoom.width * 8 > 10000 and self.zoomState == "in":
self.zoomIn.state(["disabled"])
elif self.zoom.width / 8 < 50 and self.zoomState == "out":
self.zoomOut.state(["disabled"])
else:
self.zoomIn.state(["!disabled"])
self.zoomOut.state(["!disabled"])
self.zoomState = "Not"
self.topoZoom(InOut=tmp)
def topoZoom(self, InOut="in"):
""" check zoom state to decide to unbind or bind events """
self.cv_topo.unbind("<Button-1>")
self.cv_topo.unbind("<B1-Motion>")
self.cv_topo.unbind("<ButtonRelease-1>")
if self.zoomState == InOut:
self.zoomIn.configure(style="in.zoom.TButton")
self.zoomOut.configure(style="out.zoom.TButton")
self.zoomState = "Not"
self.cv_topo.bind('<Motion>' , self.move_handler)
self.cv_topo.bind('<Button-1>', self.click_handler)
else: # self.zoomState = "Not"
if InOut == "in":
self.zoomIn.configure(style="S.in.zoom.TButton")
self.zoomOut.configure(style="out.zoom.TButton")
elif InOut == "out":
self.zoomIn.configure(style="in.zoom.TButton")
self.zoomOut.configure(style="S.out.zoom.TButton")
self.zoomState = InOut
self.cv_topo.bind("<Button-1>", self.zoomRecord)
self.cv_topo.bind("<B1-Motion>", self.zoomCreate)
self.cv_topo.bind("<ButtonRelease-1>", partial(self.zoomRelease,InOut=InOut))
def main():
sw_mac = {'s16': '00:00:00:10:15:00', 's9': '00:00:00:09:15:00', 's8': '00:00:00:08:15:00', 's17': '00:00:00:11:15:00', 's3': '00:00:00:03:15:00', 's2': '00:00:00:02:15:00', 's1': '00:00:00:01:15:00', 's10': '00:00:00:0a:15:00', 's7': '00:00:00:07:15:00', 's6': '00:00:00:06:15:00', 's5': '00:00:00:05:15:00', 's4': '00:00:00:04:15:00', 's13': '00:00:00:0d:15:00', 's20': '00:00:00:14:15:00', 's18': '00:00:00:12:15:00', 's15': '00:00:00:0f:15:00', 's12': '00:00:00:0c:15:00', 's19': '00:00:00:13:15:00', 's21': '00:00:00:15:15:00', 's14': '00:00:00:0e:15:00', 's11': '00:00:00:0b:15:00'}
h_mac = {u'h8': u'00:00:00:00:0c:08', u'h9': u'00:00:00:00:0d:09', u'h7': u'00:00:00:00:0b:07', u'h1': u'00:00:00:00:01:01', u'h6': u'00:00:00:00:0a:06', u'h12': u'00:00:00:00:10:0c', u'h13': u'00:00:00:00:12:0d', u'h14': u'00:00:00:00:13:0e', u'h15': u'00:00:00:00:15:0f', u'h4': u'00:00:00:00:07:04', u'h5': u'00:00:00:00:08:05', u'h10': u'00:00:00:00:0e:0a', u'h2': u'00:00:00:00:02:02', u'h11': u'00:00:00:00:0f:0b', u'h3': u'00:00:00:00:03:03'}
topology = {'24': {'00:00:00:05:15:00': 3, '00:00:00:04:15:00': 10}, '25': {'00:00:00:0d:15:00': 2, '00:00:00:04:15:00': 3}, '26': {'00:00:00:0e:15:00': 2, '00:00:00:04:15:00': 4}, '27': {'00:00:00:11:15:00': 2, '00:00:00:04:15:00': 7}, '20': {'00:00:00:07:15:00': 2, '00:00:00:04:15:00': 12}, '21': {'00:00:00:06:15:00': 2, '00:00:00:04:15:00': 11}, '22': {'00:00:00:08:15:00': 2, '00:00:00:04:15:00': 13}, '23': {'00:00:00:09:15:00': 2, '00:00:00:04:15:00': 14}, '28': {'00:00:00:0f:15:00': 2, '00:00:00:04:15:00': 5}, '29': {'00:00:00:04:15:00': 9, '00:00:00:14:15:00': 2}, '1': {u'00:00:00:00:12:0d': 1, '00:00:00:12:15:00': 1}, '0': {'00:00:00:13:15:00': 1, u'00:00:00:00:13:0e': 1}, '3': {'00:00:00:0d:15:00': 1, u'00:00:00:00:0d:09': 1}, '2': {'00:00:00:08:15:00': 1, u'00:00:00:00:08:05': 1}, '5': {'00:00:00:01:15:00': 1, u'00:00:00:00:01:01': 1}, '4': {u'00:00:00:00:0c:08': 1, '00:00:00:0c:15:00': 1}, '7': {'00:00:00:07:15:00': 1, u'00:00:00:00:07:04': 1}, '6': {'00:00:00:0a:15:00': 1, u'00:00:00:00:0a:06': 1}, '9': {u'00:00:00:00:0f:0b': 1, '00:00:00:0f:15:00': 1}, '8': {u'00:00:00:00:10:0c': 1, '00:00:00:10:15:00': 1}, '11': {u'00:00:00:00:03:03': 1, '00:00:00:03:15:00': 1}, '10': {'00:00:00:0e:15:00': 1, u'00:00:00:00:0e:0a': 1}, '13': {u'00:00:00:00:02:02': 1, '00:00:00:02:15:00': 1}, '12': {u'00:00:00:00:15:0f': 1, '00:00:00:15:15:00': 1}, '15': {'00:00:00:01:15:00': 2, '00:00:00:06:15:00': 1}, '14': {u'00:00:00:00:0b:07': 1, '00:00:00:0b:15:00': 1}, '17': {'00:00:00:05:15:00': 2, '00:00:00:03:15:00': 2}, '16': {'00:00:00:05:15:00': 1, '00:00:00:02:15:00': 2}, '19': {'00:00:00:04:15:00': 1, '00:00:00:0b:15:00': 2}, '18': {'00:00:00:04:15:00': 2, '00:00:00:0c:15:00': 2}, '31': {'00:00:00:13:15:00': 2, '00:00:00:04:15:00': 8}, '30': {'00:00:00:10:15:00': 2, '00:00:00:04:15:00': 6}, '34': {'00:00:00:14:15:00': 1, '00:00:00:15:15:00': 2}, '33': {'00:00:00:11:15:00': 1, '00:00:00:12:15:00': 2}, '32': {'00:00:00:0a:15:00': 2, '00:00:00:09:15:00': 1}}
direction = {'24': {'00:00:00:05:15:00': 'r', '00:00:00:04:15:00': 'q'}, '25': {'00:00:00:0d:15:00': 'q', '00:00:00:04:15:00': 'r'}, '26': {'00:00:00:0e:15:00': 'q', '00:00:00:04:15:00': 'r'}, '27': {'00:00:00:11:15:00': 'q', '00:00:00:04:15:00': 'r'}, '20': {'00:00:00:07:15:00': 'q', '00:00:00:04:15:00': 'r'}, '21': {'00:00:00:06:15:00': 'q', '00:00:00:04:15:00': 'r'}, '22': {'00:00:00:08:15:00': 'q', '00:00:00:04:15:00': 'r'}, '23': {'00:00:00:09:15:00': 'q', '00:00:00:04:15:00': 'r'}, '28': {'00:00:00:0f:15:00': 'q', '00:00:00:04:15:00': 'r'}, '29': {'00:00:00:04:15:00': 'r', '00:00:00:14:15:00': 'q'}, '1': {'00:00:00:12:15:00': 'r', u'00:00:00:00:12:0d': 'q'}, '0': {'00:00:00:13:15:00': 'r', u'00:00:00:00:13:0e': 'q'}, '3': {'00:00:00:0d:15:00': 'r', u'00:00:00:00:0d:09': 'q'}, '2': {'00:00:00:08:15:00': 'r', u'00:00:00:00:08:05': 'q'}, '5': {'00:00:00:01:15:00': 'r', u'00:00:00:00:01:01': 'q'}, '4': {u'00:00:00:00:0c:08': 'q', '00:00:00:0c:15:00': 'r'}, '7': {'00:00:00:07:15:00': 'r', u'00:00:00:00:07:04': 'q'}, '6': {'00:00:00:0a:15:00': 'r', u'00:00:00:00:0a:06': 'q'}, '9': {u'00:00:00:00:0f:0b': 'q', '00:00:00:0f:15:00': 'r'}, '8': {u'00:00:00:00:10:0c': 'q', '00:00:00:10:15:00': 'r'}, '11': {u'00:00:00:00:03:03': 'r', '00:00:00:03:15:00': 'q'}, '10': {'00:00:00:0e:15:00': 'r', u'00:00:00:00:0e:0a': 'q'}, '13': {'00:00:00:02:15:00': 'r', u'00:00:00:00:02:02': 'q'}, '12': {'00:00:00:15:15:00': 'r', u'00:00:00:00:15:0f': 'q'}, '15': {'00:00:00:01:15:00': 'q', '00:00:00:06:15:00': 'r'}, '14': {u'00:00:00:00:0b:07': 'q', '00:00:00:0b:15:00': 'r'}, '17': {'00:00:00:05:15:00': 'q', '00:00:00:03:15:00': 'r'}, '16': {'00:00:00:05:15:00': 'r', '00:00:00:02:15:00': 'q'}, '19': {'00:00:00:04:15:00': 'r', '00:00:00:0b:15:00': 'q'}, '18': {'00:00:00:04:15:00': 'r', '00:00:00:0c:15:00': 'q'}, '31': {'00:00:00:13:15:00': 'q', '00:00:00:04:15:00': 'r'}, '30': {'00:00:00:10:15:00': 'q', '00:00:00:04:15:00': 'r'}, '34': {'00:00:00:14:15:00': 'r', '00:00:00:15:15:00': 'q'}, '33': {'00:00:00:11:15:00': 'r', '00:00:00:12:15:00': 'q'}, '32': {'00:00:00:0a:15:00': 'q', '00:00:00:09:15:00': 'r'}}
node_links = {u'h8': [[1, 's12']], u'h9': [[1, 's13']], u'h2': [[1, 's2']], u'h3': [[1, 's3']], u'h1': [[1, 's1']], u'h6': [[1, 's10']], u'h7': [[1, 's11']], u'h4': [[1, 's7']], u'h5': [[1, 's8']], 's9': [[2, 's4'], [1, 's10']], 's8': [[2, 's4'], [1, u'h5']], 's3': [[1, u'h3'], [2, 's5']], 's2': [[1, u'h2'], [2, 's5']], 's1': [[1, u'h1'], [2, 's6']], 's7': [[2, 's4'], [1, u'h4']], 's6': [[1, 's1'], [2, 's4']], 's5': [[2, 's3'], [1, 's2'], [3, 's4']], 's4': [[2, 's12'], [3, 's13'], [5, 's15'], [4, 's14'], [12, 's7'], [13, 's8'], [14, 's9'], [1, 's11'], [6, 's16'], [7, 's17'], [10, 's5'], [11, 's6'], [9, 's20'], [8, 's19']], 's19': [[1, u'h14'], [2, 's4']], 's18': [[1, u'h13'], [2, 's17']], 's13': [[2, 's4'], [1, u'h9']], 's12': [[2, 's4'], [1, u'h8']], 's11': [[2, 's4'], [1, u'h7']], 's10': [[1, u'h6'], [2, 's9']], 's17': [[2, 's4'], [1, 's18']], 's16': [[2, 's4'], [1, u'h12']], 's15': [[2, 's4'], [1, u'h11']], 's14': [[2, 's4'], [1, u'h10']], u'h10': [[1, 's14']], u'h11': [[1, 's15']], u'h12': [[1, 's16']], u'h13': [[1, 's18']], u'h14': [[1, 's19']], u'h15': [[1, 's21']], 's20': [[2, 's4'], [1, 's21']], 's21': [[1, u'h15'], [2, 's20']]}
event = myEvent()
event.init(topology, direction, node_links)
event.recordName(h_mac, sw_mac)
c = ControllerGui(event, sw_mac, h_mac, topology)
c.root.mainloop()
if __name__ == '__main__':
main()
|
nbzz_set_alias.py
|
from pathlib import Path
import os
import subprocess
import threading
try:
from tqdm import tqdm
except:
try:
os.system('pip3 install tqdm')
except:
print("tqdm install error ")
exit(1)
from tqdm import tqdm
try:
from nbzz.util.config import load_config
from web3 import Web3
from typing import Dict
from nbzz.util.default_root import DEFAULT_ROOT_PATH
import yaml
except:
print("nbzz未安装,此脚本需要安装nbzz 然后 . ./activate")
exit(1)
se_lock=threading.Semaphore(10)
def i_thread_nbzz(ii_bee_path):
try:
swarm_key = ii_bee_path/"keys"/"swarm.key"
if not swarm_key.exists():
tqdm.write(f"{ii_bee_path} 目录下不存在keys文件,检查是否安装")
return
with se_lock:
result=subprocess.run(f"nbzz alias show --bee-key-path {str(swarm_key)} ", stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
result_o=result.stdout.decode().strip("\n").split(" ")
if len(result_o)<4:
print(f"ERROR: 目前状态解析错误:{result_o}")
return
now_alias=result_o[4]
if alias_for_use == now_alias:
tqdm.write(f"{ii_bee_path} 已经设置 别名: {now_alias}")
return
with se_lock:
result=subprocess.run(f"nbzz alias set-alias -p {bee_passwd} -a {alias_for_use} --bee-key-path {str(swarm_key)} ", stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
if (result.stdout.decode().split())[-1]=="success":
print(f"{ii_bee_path} 成功设置 别名: {alias_for_use}")
return
else:
tqdm.write(f"{ii_bee_path} 别名设置失败,错误如下: \n {result.stderr.decode()}")
finally:
pbar.update(1)
# 修改rpc
env = os.environ
if "NBZZ_ALIAS" in env:
alias_for_use=env["NBZZ_ALIAS"]
print(f"别名: {alias_for_use}")
else:
print("未设置别名")
exit(1)
# 读取createbee配置
bee_con_path = Path("config.yaml")
if not bee_con_path.exists():
print("路径错误,请移动到bee批量安装脚本的启动目录.")
exit(1)
with bee_con_path.open("r",) as fid:
bee_con = yaml.safe_load(fid)
bee_install_path = Path(bee_con["bee"]["base_path"])
bee_passwd = bee_con["bee"]["password"]
if not bee_install_path.exists():
print("bee未安装或者未成功启动")
exit(1)
# 开始部署
all_bee_path = [i for i in bee_install_path.glob(".bee*")]
all_bee_path.sort()
all_thread = []
pbar=tqdm(total=len(all_bee_path))
for i_bee_path in all_bee_path:
ithread = threading.Thread(target=i_thread_nbzz, args=(i_bee_path,))
all_thread.append(ithread)
ithread.setDaemon(True)
ithread.start()
for ithread in all_thread:
ithread.join()
|
dataset.py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-09 20:27
import math
import os
import random
import tempfile
import warnings
from abc import ABC, abstractmethod
from copy import copy
from logging import Logger
from typing import Union, List, Callable, Iterable, Dict, Any
import torch
import torch.multiprocessing as mp
from hanlp.common.transform import TransformList, VocabDict, EmbeddingNamedTransform
from hanlp.common.vocab import Vocab
from hanlp.components.parsers.alg import kmeans
from hanlp.utils.io_util import read_cells, get_resource
from hanlp.utils.time_util import CountdownTimer
from hanlp.utils.torch_util import dtype_of
from hanlp_common.configurable import AutoConfigurable
from hanlp_common.constant import IDX, HANLP_VERBOSE
from hanlp_common.util import isdebugging, merge_list_of_dict, k_fold
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader, Sampler
from torch.utils.data.dataset import IterableDataset
class Transformable(ABC):
def __init__(self, transform: Union[Callable, List] = None) -> None:
"""An object which can be transformed with a list of functions. It can be treated as an objected being passed
through a list of functions, while these functions are kept in a list.
Args:
transform: A transform function or a list of functions.
"""
super().__init__()
if isinstance(transform, list) and not isinstance(transform, TransformList):
transform = TransformList(*transform)
self.transform: Union[Callable, TransformList] = transform
def append_transform(self, transform: Callable):
"""Append a transform to its list of transforms.
Args:
transform: A new transform to be appended.
Returns:
Itself.
"""
assert transform is not None, 'None transform not allowed'
if not self.transform:
self.transform = TransformList(transform)
elif not isinstance(self.transform, TransformList):
if self.transform != transform:
self.transform = TransformList(self.transform, transform)
else:
if transform not in self.transform:
self.transform.append(transform)
return self
def insert_transform(self, index: int, transform: Callable):
"""Insert a transform to a certain position.
Args:
index: A certain position.
transform: A new transform.
Returns:
Itself.
"""
assert transform is not None, 'None transform not allowed'
if not self.transform:
self.transform = TransformList(transform)
elif not isinstance(self.transform, TransformList):
if self.transform != transform:
self.transform = TransformList(self.transform)
self.transform.insert(index, transform)
else:
if transform not in self.transform:
self.transform.insert(index, transform)
return self
def transform_sample(self, sample: dict, inplace=False) -> dict:
"""Apply transforms to a sample.
Args:
sample: A sample, which is a ``dict`` holding features.
inplace: ``True`` to apply transforms inplace.
.. Attention::
If any transform modifies existing features, it will modify again and again when ``inplace=True``.
For example, if a transform insert a ``BOS`` token to a list inplace, and it is called twice,
then 2 ``BOS`` will be inserted which might not be an intended result.
Returns:
Transformed sample.
"""
if not inplace:
sample = copy(sample)
if self.transform:
sample = self.transform(sample)
return sample
class TransformableDataset(Transformable, Dataset, ABC):
def __init__(self,
data: Union[str, List],
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
"""A :class:`~torch.utils.data.Dataset` which can be applied with a list of transform functions.
Args:
data: The local or remote path to a dataset, or a list of samples where each sample is a dict.
transform: Predefined transform(s).
cache: ``True`` to enable caching, so that transforms won't be called twice.
generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when
samples are re-ordered by a sampler.
"""
super().__init__(transform)
if generate_idx is None:
generate_idx = isinstance(data, list)
data = self.load_data(data, generate_idx)
assert data, 'No samples loaded'
assert isinstance(data[0],
dict), f'TransformDataset expects each sample to be a dict but got {type(data[0])} instead.'
self.data = data
if cache:
self.cache = [None] * len(data)
else:
self.cache = None
def load_data(self, data, generate_idx=False):
"""A intermediate step between constructor and calling the actual file loading method.
Args:
data: If data is a file, this method calls :meth:`~hanlp.common.dataset.TransformableDataset.load_file`
to load it.
generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when
samples are re-ordered by a sampler.
Returns: Loaded samples.
"""
if self.should_load_file(data):
if isinstance(data, str):
data = get_resource(data)
data = list(self.load_file(data))
if generate_idx:
for i, each in enumerate(data):
each[IDX] = i
# elif isinstance(data, list):
# data = self.load_list(data)
return data
# noinspection PyMethodMayBeStatic
# def load_list(self, data: list) -> List[Dict[str, Any]]:
# return data
def should_load_file(self, data) -> bool:
"""Determines whether data is a filepath.
Args:
data: Data to check.
Returns: ``True`` to indicate it's a filepath.
"""
return isinstance(data, str)
@abstractmethod
def load_file(self, filepath: str):
"""The actual file loading logic.
Args:
filepath: The path to a dataset.
"""
pass
def __getitem__(self, index: Union[int, slice]) -> Union[dict, List[dict]]:
""" Get the index-th sample in this dataset.
Args:
index: Either a integer index of a list of indices.
Returns: Either a sample or or list of samples depending on how many indices are passed in.
"""
# if isinstance(index, (list, tuple)):
# assert len(index) == 1
# index = index[0]
if isinstance(index, slice):
indices = range(*index.indices(len(self)))
return [self[i] for i in indices]
if self.cache:
cache = self.cache[index]
if cache:
return cache
sample = self.data[index]
sample = self.transform_sample(sample)
if self.cache:
self.cache[index] = sample
return sample
def __len__(self) -> int:
return len(self.data)
def __repr__(self) -> str:
return f'{len(self)} samples: {self[0]} ...'
def purge_cache(self):
"""Purges all cache. If cache is not enabled, this method enables it.
"""
self.cache = [None] * len(self.data)
def split(self, *ratios):
"""Split dataset into subsets.
Args:
*ratios: The ratios for each subset. They can be any type of numbers which will be normalized. For example,
``8, 1, 1`` are equivalent to ``0.8, 0.1, 0.1``.
Returns:
list[TransformableDataset]: A list of subsets.
"""
ratios = [x / sum(ratios) for x in ratios]
chunks = []
prev = 0
for r in ratios:
cur = prev + math.ceil(len(self) * r)
chunks.append([prev, cur])
prev = cur
chunks[-1][1] = len(self)
outputs = []
for b, e in chunks:
dataset = copy(self)
dataset.data = dataset.data[b:e]
if dataset.cache:
dataset.cache = dataset.cache[b:e]
outputs.append(dataset)
return outputs
def k_fold(self, k, i):
"""Perform k-fold sampling.
Args:
k (int): Number of folds.
i (int): The i-th fold.
Returns:
TransformableDataset: The i-th fold subset of this dataset.
"""
assert 0 <= i <= k, f'Invalid split {i}'
train_indices, test_indices = k_fold(k, len(self), i)
return self.subset(train_indices), self.subset(test_indices)
def subset(self, indices):
"""Create a subset given indices of samples.
Args:
indices: Indices of samples.
Returns:
TransformableDataset: The a subset of this dataset.
"""
dataset = copy(self)
dataset.data = [dataset.data[i] for i in indices]
if dataset.cache:
dataset.cache = [dataset.cache[i] for i in indices]
return dataset
def shuffle(self):
"""Shuffle this dataset inplace.
"""
if not self.cache:
random.shuffle(self.data)
else:
z = list(zip(self.data, self.cache))
random.shuffle(z)
self.data, self.cache = zip(*z)
def prune(self, criterion: Callable, logger: Logger = None):
"""Prune (to discard) samples according to a criterion.
Args:
criterion: A functions takes a sample as input and output ``True`` if the sample needs to be pruned.
logger: If any, log statistical messages using it.
Returns:
int: Size before pruning.
"""
# noinspection PyTypeChecker
size_before = len(self)
good_ones = [i for i, s in enumerate(self) if not criterion(s)]
self.data = [self.data[i] for i in good_ones]
if self.cache:
self.cache = [self.cache[i] for i in good_ones]
if logger:
size_after = len(self)
num_pruned = size_before - size_after
logger.info(f'Pruned [yellow]{num_pruned} ({num_pruned / size_before:.1%})[/yellow] '
f'samples out of {size_before}.')
return size_before
class TransformSequentialDataset(Transformable, IterableDataset, ABC):
pass
class DeviceDataLoader(DataLoader):
def __init__(self, dataset, batch_size=32, shuffle=False, sampler=None,
batch_sampler=None, num_workers=None, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, multiprocessing_context=None,
device=None, **kwargs):
if batch_sampler is not None:
batch_size = 1
if num_workers is None:
if isdebugging():
num_workers = 0
else:
num_workers = 2
# noinspection PyArgumentList
super(DeviceDataLoader, self).__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory, drop_last=drop_last, timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context, **kwargs)
self.device = device
def __iter__(self):
for raw_batch in super(DeviceDataLoader, self).__iter__():
if self.device is not None:
for field, data in raw_batch.items():
if isinstance(data, torch.Tensor):
data = data.to(self.device)
raw_batch[field] = data
yield raw_batch
def collate_fn(self, samples):
return merge_list_of_dict(samples)
class PadSequenceDataLoader(DataLoader):
def __init__(self, dataset, batch_size=32, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, multiprocessing_context=None,
pad: dict = None, vocabs: VocabDict = None, device=None, **kwargs):
""" A dataloader commonly used for NLP tasks. It offers the following convenience.
- Bachify each field of samples into a :class:`~torch.Tensor` if the field name satisfies the following criterion.
- Name ends with _id, _ids, _count, _offset, _span, mask
- Name is in `pad` dict.
- Pad each field according to field name, the vocabs and pad dict.
- Move :class:`~torch.Tensor` onto device.
Args:
dataset: A :class:`~torch.utils.data.Dataset` to be bachified.
batch_size: Max size of each batch.
shuffle: ``True`` to shuffle batches.
sampler: A :class:`~torch.utils.data.Sampler` to sample samples from data.
batch_sampler: A :class:`~torch.utils.data.Sampler` to sample batches form all batches.
num_workers: Number of workers for multi-thread loading. Note that multi-thread loading aren't always
faster.
collate_fn: A function to perform batchifying. It must be set to ``None`` in order to make use of the
features this class offers.
pin_memory: If samples are loaded in the Dataset on CPU and would like to be pushed to
the GPU, enabling pin_memory can speed up the transfer. It's not useful since most data field are
not in Tensor type.
drop_last: Drop the last batch since it could be half-empty.
timeout: For multi-worker loading, set a timeout to wait for a worker.
worker_init_fn: Init function for multi-worker.
multiprocessing_context: Context for multiprocessing.
pad: A dict holding field names and their padding values.
vocabs: A dict of vocabs so padding value can be fetched from it.
device: The device tensors will be moved onto.
**kwargs: Other arguments will be passed to :meth:`torch.utils.data.Dataset.__init__`
"""
if device == -1:
device = None
if collate_fn is None:
collate_fn = self.collate_fn
if num_workers is None:
if isdebugging():
num_workers = 0
else:
num_workers = 2
if batch_sampler is None:
assert batch_size, 'batch_size has to be specified when batch_sampler is None'
else:
batch_size = 1
shuffle = None
drop_last = None
# noinspection PyArgumentList
super(PadSequenceDataLoader, self).__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory, drop_last=drop_last, timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context, **kwargs)
self.vocabs = vocabs
if isinstance(dataset, TransformableDataset) and dataset.transform:
transform = dataset.transform
if not isinstance(transform, TransformList):
transform = []
for each in transform:
if isinstance(each, EmbeddingNamedTransform):
if pad is None:
pad = {}
if each.dst not in pad:
pad[each.dst] = 0
self.pad = pad
self.device = device
def __iter__(self):
for raw_batch in super(PadSequenceDataLoader, self).__iter__():
yield self.tensorize(raw_batch, vocabs=self.vocabs, pad_dict=self.pad, device=self.device)
@staticmethod
def tensorize(raw_batch: Dict[str, Any], vocabs: VocabDict, pad_dict: Dict[str, int] = None, device=None):
for field, data in raw_batch.items():
if isinstance(data, torch.Tensor):
continue
vocab_key = field[:-len('_id')] if field.endswith('_id') else None
vocab: Vocab = vocabs.get(vocab_key, None) if vocabs and vocab_key else None
if vocab:
pad = vocab.safe_pad_token_idx
dtype = torch.long
elif pad_dict is not None and field in pad_dict:
pad = pad_dict[field]
dtype = dtype_of(pad)
elif field.endswith('_offset') or field.endswith('_id') or field.endswith(
'_count') or field.endswith('_ids') or field.endswith('_score') or field.endswith(
'_length') or field.endswith('_span'):
# guess some common fields to pad
pad = 0
dtype = torch.long
elif field.endswith('_mask'):
pad = False
dtype = torch.bool
else:
# no need to pad
continue
data = PadSequenceDataLoader.pad_data(data, pad, dtype)
raw_batch[field] = data
if device is not None:
for field, data in raw_batch.items():
if isinstance(data, torch.Tensor):
data = data.to(device)
raw_batch[field] = data
return raw_batch
@staticmethod
def pad_data(data: Union[torch.Tensor, Iterable], pad, dtype=None, device=None):
"""Perform the actual padding for a given data.
Args:
data: Data to be padded.
pad: Padding value.
dtype: Data type.
device: Device to be moved onto.
Returns:
torch.Tensor: A ``torch.Tensor``.
"""
if isinstance(data[0], torch.Tensor):
data = pad_sequence(data, True, pad)
elif isinstance(data[0], Iterable):
inner_is_iterable = False
for each in data:
if len(each):
if isinstance(each[0], Iterable):
inner_is_iterable = True
if len(each[0]):
if not dtype:
dtype = dtype_of(each[0][0])
else:
inner_is_iterable = False
if not dtype:
dtype = dtype_of(each[0])
break
if inner_is_iterable:
max_seq_len = len(max(data, key=len))
max_word_len = len(max([chars for words in data for chars in words], key=len))
ids = torch.zeros(len(data), max_seq_len, max_word_len, dtype=dtype, device=device)
for i, words in enumerate(data):
for j, chars in enumerate(words):
ids[i][j][:len(chars)] = torch.tensor(chars, dtype=dtype, device=device)
data = ids
else:
data = pad_sequence([torch.tensor(x, dtype=dtype, device=device) for x in data], True, pad)
elif isinstance(data, list):
data = torch.tensor(data, dtype=dtype, device=device)
return data
def collate_fn(self, samples):
return merge_list_of_dict(samples)
class CachedDataLoader(object):
def __init__(self, dataloader: torch.utils.data.DataLoader, filename=None):
if not filename:
filename = tempfile.NamedTemporaryFile(prefix='hanlp-cache-', delete=False).name
self.filename = filename
self.size = len(dataloader)
self._build_cache(dataloader)
def _build_cache(self, dataset, verbose=HANLP_VERBOSE):
timer = CountdownTimer(self.size)
with open(self.filename, "wb") as f:
for i, batch in enumerate(dataset):
torch.save(batch, f, _use_new_zipfile_serialization=False)
if verbose:
timer.log(f'Caching {self.filename} [blink][yellow]...[/yellow][/blink]')
def close(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
def __iter__(self):
with open(self.filename, "rb") as f:
for i in range(self.size):
batch = torch.load(f)
yield batch
def __len__(self):
return self.size
def _prefetch_generator(dataloader, queue, batchify=None):
while True:
for batch in dataloader:
if batchify:
batch = batchify(batch)
queue.put(batch)
class PrefetchDataLoader(DataLoader):
def __init__(self, dataloader: torch.utils.data.DataLoader, prefetch: int = 10, batchify: Callable = None) -> None:
""" A dataloader wrapper which speeds up bachifying using multi-processing. It works best for dataloaders
of which the bachify takes very long time. But it introduces extra GPU memory consumption since prefetched
batches are stored in a ``Queue`` on GPU.
.. Caution::
PrefetchDataLoader only works in spawn mode with the following initialization code:
Examples::
if __name__ == '__main__':
import torch
torch.multiprocessing.set_start_method('spawn')
And these 2 lines **MUST** be put into ``if __name__ == '__main__':`` block.
Args:
dataloader: A :class:`~torch.utils.data.DatasetLoader` to be prefetched.
prefetch: Number of batches to prefetch.
batchify: A bachify function called on each batch of samples. In which case, the inner dataloader shall
return samples without really bachify them.
"""
super().__init__(dataset=dataloader)
self._batchify = batchify
self.prefetch = None if isdebugging() else prefetch
if self.prefetch:
self._fire_process(dataloader, prefetch)
def _fire_process(self, dataloader, prefetch):
self.queue = mp.Queue(prefetch)
self.process = mp.Process(target=_prefetch_generator, args=(dataloader, self.queue, self._batchify))
self.process.start()
def __iter__(self):
if not self.prefetch:
for batch in self.dataset:
if self._batchify:
batch = self._batchify(batch)
yield batch
else:
size = len(self)
while size:
batch = self.queue.get()
yield batch
size -= 1
def close(self):
"""Close this dataloader and terminates internal processes and queue. It's recommended to call this method to
ensure a program can gracefully shutdown.
"""
if self.prefetch:
self.queue.close()
self.process.terminate()
@property
def batchify(self):
return self._batchify
@batchify.setter
def batchify(self, batchify):
self._batchify = batchify
if not self.prefetch:
prefetch = vars(self.queue).get('maxsize', 10)
self.close()
self._fire_process(self.dataset, prefetch)
class BucketSampler(Sampler):
# noinspection PyMissingConstructor
def __init__(self, buckets: Dict[float, List[int]], batch_max_tokens, batch_size=None, shuffle=False):
"""A bucketing based sampler which groups samples into buckets then creates batches from each bucket.
Args:
buckets: A dict of which keys are some statistical numbers of each bucket, and values are the indices of
samples in each bucket.
batch_max_tokens: Maximum tokens per batch.
batch_size: Maximum samples per batch.
shuffle: ``True`` to shuffle batches and samples in a batch.
"""
self.shuffle = shuffle
self.sizes, self.buckets = zip(*[
(size, bucket) for size, bucket in buckets.items()
])
# the number of chunks in each bucket, which is clipped by
# range [1, len(bucket)]
if batch_size:
self.chunks = [
max(batch_size, min(len(bucket), max(round(size * len(bucket) / batch_max_tokens), 1)))
for size, bucket in zip(self.sizes, self.buckets)
]
else:
self.chunks = [
min(len(bucket), max(round(size * len(bucket) / batch_max_tokens), 1))
for size, bucket in zip(self.sizes, self.buckets)
]
def __iter__(self):
# if shuffle, shuffle both the buckets and samples in each bucket
range_fn = torch.randperm if self.shuffle else torch.arange
for i in range_fn(len(self.buckets)).tolist():
split_sizes = [(len(self.buckets[i]) - j - 1) // self.chunks[i] + 1 for j in range(self.chunks[i])]
# DON'T use `torch.chunk` which may return wrong number of chunks
for batch in range_fn(len(self.buckets[i])).split(split_sizes):
yield [self.buckets[i][j] for j in batch.tolist()]
def __len__(self):
return sum(self.chunks)
class KMeansSampler(BucketSampler):
def __init__(self, lengths, batch_max_tokens, batch_size=None, shuffle=False, n_buckets=1):
"""A bucket sampler which groups samples using KMeans on their lengths.
Args:
lengths: Lengths of each sample, usually measured by number of tokens.
batch_max_tokens: Maximum tokens per batch.
batch_size: Maximum samples per batch.
shuffle: ``True`` to shuffle batches. Samples in the same batch won't be shuffled since the ordered sequence
is helpful to speed up RNNs.
n_buckets: Number of buckets. Clusters in terms of KMeans.
"""
if n_buckets > len(lengths):
n_buckets = 1
self.n_buckets = n_buckets
self.lengths = lengths
buckets = dict(zip(*kmeans(self.lengths, n_buckets)))
super().__init__(buckets, batch_max_tokens, batch_size, shuffle)
class SortingSampler(Sampler):
# noinspection PyMissingConstructor
def __init__(self, lengths: List[int], batch_size=None, batch_max_tokens=None, shuffle=False) -> None:
"""A sampler which sort samples according to their lengths. It takes a continuous chunk of sorted samples to
make a batch.
Args:
lengths: Lengths of each sample, usually measured by number of tokens.
batch_max_tokens: Maximum tokens per batch.
batch_size: Maximum samples per batch.
shuffle: ``True`` to shuffle batches and samples in a batch.
"""
# assert any([batch_size, batch_max_tokens]), 'At least one of batch_size and batch_max_tokens is required'
self.shuffle = shuffle
self.batch_size = batch_size
# self.batch_max_tokens = batch_max_tokens
self.batch_indices = []
num_tokens = 0
mini_batch = []
for i in torch.argsort(torch.tensor(lengths), descending=True).tolist():
# if batch_max_tokens:
if (batch_max_tokens is None or num_tokens + lengths[i] <= batch_max_tokens) and (
batch_size is None or len(mini_batch) < batch_size):
mini_batch.append(i)
num_tokens += lengths[i]
else:
if not mini_batch: # this sequence is longer than batch_max_tokens
mini_batch.append(i)
self.batch_indices.append(mini_batch)
mini_batch = []
num_tokens = 0
else:
self.batch_indices.append(mini_batch)
mini_batch = [i]
num_tokens = lengths[i]
if mini_batch:
self.batch_indices.append(mini_batch)
def __iter__(self):
if self.shuffle:
random.shuffle(self.batch_indices)
for batch in self.batch_indices:
yield batch
def __len__(self) -> int:
return len(self.batch_indices)
class SamplerBuilder(AutoConfigurable, ABC):
@abstractmethod
def build(self, lengths: List[int], shuffle=False, gradient_accumulation=1, **kwargs) -> Sampler:
"""Build a ``Sampler`` given statistics of samples and other arguments.
Args:
lengths: The lengths of samples.
shuffle: ``True`` to shuffle batches. Note samples in each mini-batch are not necessarily shuffled.
gradient_accumulation: Number of mini-batches per update step.
**kwargs: Other arguments to be passed to the constructor of the sampler.
"""
pass
def __call__(self, lengths: List[int], shuffle=False, **kwargs) -> Sampler:
return self.build(lengths, shuffle, **kwargs)
def scale(self, gradient_accumulation):
r"""Scale down the ``batch_size`` and ``batch_max_tokens`` to :math:`\frac{1}{\text{gradient_accumulation}}`
of them respectively.
Args:
gradient_accumulation: Number of mini-batches per update step.
Returns:
tuple(int,int): batch_size, batch_max_tokens
"""
batch_size = self.batch_size
batch_max_tokens = self.batch_max_tokens
if gradient_accumulation:
if batch_size:
batch_size //= gradient_accumulation
if batch_max_tokens:
batch_max_tokens //= gradient_accumulation
return batch_size, batch_max_tokens
class SortingSamplerBuilder(SortingSampler, SamplerBuilder):
# noinspection PyMissingConstructor
def __init__(self, batch_size=None, batch_max_tokens=None) -> None:
"""Builds a :class:`~hanlp.common.dataset.SortingSampler`.
Args:
batch_max_tokens: Maximum tokens per batch.
batch_size: Maximum samples per batch.
"""
self.batch_max_tokens = batch_max_tokens
self.batch_size = batch_size
def build(self, lengths: List[int], shuffle=False, gradient_accumulation=1, **kwargs) -> Sampler:
batch_size, batch_max_tokens = self.scale(gradient_accumulation)
return SortingSampler(lengths, batch_size, batch_max_tokens, shuffle)
def __len__(self) -> int:
return 1
class KMeansSamplerBuilder(KMeansSampler, SamplerBuilder):
# noinspection PyMissingConstructor
def __init__(self, batch_max_tokens, batch_size=None, n_buckets=1):
"""Builds a :class:`~hanlp.common.dataset.KMeansSampler`.
Args:
batch_max_tokens: Maximum tokens per batch.
batch_size: Maximum samples per batch.
n_buckets: Number of buckets. Clusters in terms of KMeans.
"""
self.n_buckets = n_buckets
self.batch_size = batch_size
self.batch_max_tokens = batch_max_tokens
def build(self, lengths: List[int], shuffle=False, gradient_accumulation=1, **kwargs) -> Sampler:
batch_size, batch_max_tokens = self.scale(gradient_accumulation)
return KMeansSampler(lengths, batch_max_tokens, batch_size, shuffle, self.n_buckets)
def __len__(self) -> int:
return 1
class TableDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
transform: Union[Callable, List] = None,
cache=None,
delimiter='auto',
strip=True,
headers=None) -> None:
self.headers = headers
self.strip = strip
self.delimiter = delimiter
super().__init__(data, transform, cache)
def load_file(self, filepath: str):
for idx, cells in enumerate(read_cells(filepath, strip=self.strip, delimiter=self.delimiter)):
if not idx and not self.headers:
self.headers = cells
if any(len(h) > 32 for h in self.headers):
warnings.warn('As you did not pass in `headers` to `TableDataset`, the first line is regarded as '
'headers. However, the length for some headers are too long (>32), which might be '
'wrong. To make sure, pass `headers=...` explicitly.')
else:
yield dict(zip(self.headers, cells))
|
spotify.py
|
import os
import typing
from multiprocessing import Pipe, Process
import uvicorn
from pyfy import ApiError, ClientCreds, Spotify
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
def _code_server(connection):
async def homepage(request):
code = request.query_params["code"]
connection.send(code)
connection.close()
return JSONResponse("ok")
app = Starlette(routes=[Route("/", homepage)])
uvicorn.run(app, host="0.0.0.0", port=4444, log_level="error")
def wait_for_code():
parent_conn, child_conn = Pipe()
p = Process(target=_code_server, args=(child_conn,))
p.start()
code = parent_conn.recv()
p.terminate()
return code
def get_spotify_client(hostname) -> Spotify:
client = ClientCreds(
client_id=os.getenv("SPOTIFY_CLIENT_KEY"),
client_secret=os.getenv("SPOTIFY_CLIENT_SECRET"),
scopes=["user-read-currently-playing", "user-read-recently-played"],
redirect_uri=f"http://{hostname}:4444",
)
return Spotify(client_creds=client)
def authorize(spotify: Spotify) -> None:
print(spotify.auth_uri())
code = wait_for_code()
spotify.build_user_creds(grant=code, set_user_creds=True)
def get_last_song(spotify: Spotify) -> typing.Optional[typing.Dict]:
try:
current_song = spotify.currently_playing()
if current_song:
return current_song["item"]
else:
last_tracks = spotify.recently_played_tracks(limit=1)["items"]
return last_tracks[0]["track"] if last_tracks else None
except ApiError:
return None
|
corpusiter.py
|
#!/usr/bin/env python
# coding: utf-8
#
# Usage:
# Author: wxm71(weixing.mei@aispeech.com)
import pdb
import logging
import threading
import numpy as np
import mxnet as mx
import multiprocessing
from multiprocessing import Queue
from .utils import batchify
from .generator import NceLabGenerator
class NceCorpusIter(mx.io.DataIter):
def __init__(self, source, batch_size, bptt, numlab, negative, num_parall=2):
super(NceCorpusIter, self).__init__()
self.logger = logging.getLogger('NceCorpusIter')
# [num_of_batch, batch_size]
self.source_ = batchify(np.array(source), batch_size)
self.batch_size = batch_size
self.bptt = bptt
self.numlab = numlab
self.num_parall = num_parall
self.negative = negative
self.ppoll = []
self.queue = Queue(20)
self.jobend_pid = None
self.running = False
label_shape = (bptt, batch_size, numlab)
self.provide_label = [
mx.io.DataDesc(name='label', shape=label_shape, dtype='int32'),
mx.io.DataDesc(name='label_weight', shape=label_shape, dtype='float32')]
self.provide_data = [mx.io.DataDesc(name='data', shape=(bptt, batch_size), dtype='int32')]
def _start(self):
if self.running:
self.logger.warning('queue is not empty, just skip')
return
else:
flag = False
for t in self.ppoll:
flag = t.is_alive()
if flag:
break
if flag:
self.logger.warning('exist alive thread, skip corpus iter start')
def target_func(data, idxbeg, idxend, batch_size, bptt, numlab, negative, queue):
generator = NceLabGenerator(data, idxbeg, idxend, batch_size, bptt, numlab, negative, queue)
generator.run()
self.ppoll = []
nbatch = int(self.source_.shape[0]/self.bptt)
num_parall = self.num_parall
if nbatch<num_parall:
num_parall = nbatch
nstep = int(nbatch/num_parall)*self.bptt
idxbeg = 0
for i in range(0, num_parall-1):
pid = multiprocessing.Process(target=target_func, args=(self.source_, idxbeg, idxbeg+nstep,
self.batch_size, self.bptt, self.numlab, self.negative, self.queue) )
pid.start()
self.ppoll.append(pid)
idxbeg += nstep
idxend = len(self.source_)-1
pid = multiprocessing.Process(target=target_func, args=(self.source_, idxbeg, idxend,
self.batch_size, self.bptt, self.numlab, self.negative, self.queue) )
pid.start()
self.ppoll.append(pid)
# start job end thread
def jobend_proc():
self.logger.debug('wait for data generator exit')
for pid in self.ppoll:
pid.join()
self.logger.debug('all data generator finished')
self.queue.put(None)
self.jobend_pid = threading.Thread(target=jobend_proc)
self.jobend_pid.start()
self.running = True
def _stop(self):
pass
def getdata(self):
return self._next_data
def getlabel(self):
return self._next_label
def iter_next(self):
batchdata = self.queue.get()
if batchdata is None:
return False
else:
self._next_data = [ batchdata[0] ]
self._next_label = [ batchdata[1], batchdata[2] ]
return True
def next(self):
if not self.running:
self._start()
if self.iter_next():
return mx.io.DataBatch(data=self.getdata(), provide_data=self.provide_data,
label=self.getlabel(), provide_label=self.provide_label)
else:
self.running = False
raise StopIteration
def reset(self):
self._stop()
self._start()
|
worker.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
import threading
import traceback
import socket
from collections import deque
from multiprocessing import reduction
from nvidia.dali._utils.external_source_impl import SourceKind, _is_generator_function
from nvidia.dali._multiproc.shared_batch import SharedBatchWriter, SharedBatchMeta, BufShmChunk, \
assert_valid_data_type, read_shm_message, write_shm_message
from nvidia.dali._multiproc.messages import CompletedTask, WorkerArgs, ShmMessageDesc, ScheduledTask
from nvidia.dali._multiproc.shared_queue import Dispatcher
class _WorkerProcessingResult:
"""Internal worker message containing computed minibatch or error message sent from the main thread
to the dispatcher thread. The dispatcher thread serializes the batch or the error and
forwards the result as `CompletedTask` to the main process"""
def __init__(self, scheduled, shm_chunk, data_batch=None, exception=None,
traceback_str=None):
self.context_i = scheduled.context_i
self.scheduled_i = scheduled.scheduled_i
self.minibatch_i = scheduled.task.minibatch_i
self.shm_chunk = shm_chunk
self.data_batch = data_batch
self.exception = exception
self.traceback_str = traceback_str
@classmethod
def done(cls, scheduled, shm_chunk, data_batch):
return cls(scheduled, shm_chunk, data_batch)
@classmethod
def failed(cls, scheduled, shm_chunk, exception, traceback_str=None):
return cls(scheduled, shm_chunk, exception=exception, traceback_str=traceback_str)
def is_failed(self):
return self.exception is not None
class SharedBatchDispatcher(Dispatcher):
"""SharedBatchesDispatcher serializes batches, puts them into provided
shared memory chunks along with completed task description and puts information
about ready chunks into the `queue`. It processes tasks in a separate thread to
overlap serialization of minibatches with next minibatches computation in case of
a callback waiting on IO extensively and to avoid multiple worker processes
waiting on inter-process ShmQueue access"""
def __init__(self, worker_id, result_queue, recv_queues):
# close receiving queues if writing results fails to unblock
# the main thread that may be waiting on new tasks to process
def on_thread_exit():
for queue in recv_queues:
queue.close()
super().__init__(result_queue, on_thread_exit)
self.worker_id = worker_id
def _serialize_failed_task(self, processed_task : _WorkerProcessingResult):
"""
Puts CompletedTask instance (that describes an error encountered when producing batch) in the provided
shared memory chunk (`processed_task.shm_chunk`).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement (offset=0, size) of the
serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
completed_task = CompletedTask.failed(self.worker_id, processed_task)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, 0, resize=True)
def _serialize_done_task(self, processed_task : _WorkerProcessingResult):
"""
Puts produced batch in the provided shared memory chunk (`processed_task.shm_chunk`).
Layout of the data in the chunk:
[1. samples from the batch | 2. batch meta-data | 3. completed task].
1. Binary encoded samples from the batch (underlying data of numpy arrays),
aimed to be used as initialization buffers for arrays with no additional copy or deserialization.
2. Pickled list of meta-data of each sample, such as the sample's binary data offset in the chunk,
a shape and a type of the array.
3. Pickled CompletedTask instance (that contains offset and size of the serialized list from the second point).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement (offset, size) of the
serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
sbw = SharedBatchWriter(shm_chunk, processed_task.data_batch)
batch_meta = SharedBatchMeta.from_writer(sbw)
completed_task = CompletedTask.done(self.worker_id, processed_task, batch_meta)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, sbw.total_size, resize=True)
def serialize_msgs(self, processed_tasks: List[_WorkerProcessingResult]):
shm_msgs = []
for processed_task in processed_tasks:
if processed_task.is_failed(): # one of the tasks failed
shm_msgs.append(self._serialize_failed_task(processed_task))
else:
shm_msgs.append(self._serialize_done_task(processed_task))
return shm_msgs
class SimpleQueueTaskReceiver:
"""
Simple wrapper around shm queue, pops first element from the queue
and returns
"""
def __init__(self, queue):
self.queue = queue
def get_task(self):
recv = self.queue.get()
if recv is None:
return
[task] = recv
return task
def get_recv_queues(self):
return [self.queue]
def close(self):
self.queue.close()
class MixedTaskReceiver:
"""
Mixes eager and idle worker threads each taking tasks from a different inter-process queue and
putting the tasks into a single (worker's internal) `task_queue`. Eager worker thread takes tasks from
the dedicated queue, i.e. tasks that can be processed only by the given worker process.
Idle worker thread takes tasks from the general queue, i.e. tasks that can be processed by
any worker process from the pool.
Eager worker reads tasks whenever any is available and moves them into the worker's internal queue,
whereas idle worker serves as a fallback that aims to read a single item only if the internal queue is empty
and the main thread does not process any task (is idle).
"""
class EagerReceiverWorker:
"""
Worker thread waiting for any tasks available in the inter-process queue `dedicated_task_queue`.
If anything is available, it takes all the items and puts them into worker's internal task queue.
"""
def __init__(self, receiver_state, dedicated_task_queue):
self.receiver_state = receiver_state
self.dedicated_task_queue = dedicated_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
recv = self.dedicated_task_queue.get(num_samples=None)
if recv is None:
break
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.dedicated_task_queue.close()
self.thread.join()
class IdleReceiverWorker:
"""
Worker thread that, when notified, takes a single task from the inter-process queue and
puts it into worker's internal task queue. It aims to take the task only if the main thread
reports it has no tasks to process - it rechecks that condition if it had to wait on empty
inter-process queue.
"""
def __init__(self, receiver_state, general_task_queue):
self.receiver_state = receiver_state
self.general_task_queue = general_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
if not self.receiver_state.wait_for_idle():
break
# Worker has no dedicated work to do (is idle), so take one task from general queue.
# If general queue is empty, the call will block and then recheck the condition
recv = self.general_task_queue.get(predicate=self.receiver_state.is_idle_and_uninterrupted)
if recv is None:
break
if len(recv): # if `is_idle_and_uninterrupted` returned False, recv is an empty list
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.receiver_state.interrupt_idle()
self.general_task_queue.close()
self.thread.join()
class MixedReceiverState:
def __init__(self):
self.lock = threading.Lock()
self.tasks_cv = threading.Condition(lock=self.lock)
self.idle_cv = threading.Condition(lock=self.lock)
self.is_idle = False
self.is_interrupted = False
self.task_queue = deque()
def _is_idle_state(self):
return self.is_idle and len(self.task_queue) == 0
def is_idle_and_uninterrupted(self):
with self.lock:
return not self.is_interrupted and self._is_idle_state()
def wait_for_idle(self):
with self.lock:
while not self.is_interrupted and not self._is_idle_state():
self.idle_cv.wait()
return not self.is_interrupted
def interrupt_idle(self):
with self.lock:
self.is_interrupted = True
self.idle_cv.notify()
def insert_task(self, recv):
with self.lock:
if recv is None:
self.task_queue.appendleft(recv)
else:
self.task_queue.extend(recv)
self.tasks_cv.notify()
def get_task(self):
with self.lock:
waited = False
while len(self.task_queue) == 0:
# there's only one consumer of task_queue, so no stealing of tasks between waits can happen
if not waited:
waited = True
self.is_idle = True
self.idle_cv.notify()
self.tasks_cv.wait()
self.is_idle = False
task = self.task_queue.popleft()
return task
def __init__(self, dedicated_task_queue, general_task_queue):
self.dedicated_task_queue = dedicated_task_queue
self.general_task_queue = general_task_queue
self.state = self.MixedReceiverState()
self.receivers = []
try:
self.receivers.append(self.EagerReceiverWorker(self.state, self.dedicated_task_queue))
self.receivers.append(self.IdleReceiverWorker(self.state, self.general_task_queue))
except:
self.close()
raise
def get_recv_queues(self):
return [self.general_task_queue, self.dedicated_task_queue]
def get_task(self):
return self.state.get_task()
def close(self):
for receiver in self.receivers:
receiver.close()
self.receivers.clear()
class IterableSource:
"""Wraps iterator/generator passed to External Source to enforce ES `cycle` policy specified by the user.
It is a counterpart of _CycleIter/_CycleGenIter wrappers from non parallel mode.
However due to prefetching in parallel mode `cycle`=raise will raise StopIteration in consecutive calls
until the new epoch starts (i.e. which happens with pipline.reset call)"""
def __init__(self, source_desc):
self.source_desc = source_desc
self._reset_iter(0)
def __call__(self, scheduled : ScheduledTask):
if self.raised_stop_iter:
# if iterator runs in "raise" mode and a new epoch started (i.e. source context was reset)
if self.source_desc.cycle == "raise" and self.epoch_start < scheduled.epoch_start:
self._reset_iter(scheduled.epoch_start)
else:
raise StopIteration
return self._get_next()
def _reset_iter(self, epoch_start):
self.iter = IterableSource.get_iter(self.source_desc)
self.raised_stop_iter = False
self.epoch_start = epoch_start
def _get_next(self):
try:
return next(self.iter)
except StopIteration:
self.raised_stop_iter = True
if self.source_desc.cycle != "quiet" and self.source_desc.cycle is not True:
raise
# in quiet mode immediately reset the source and return the first iteration
self.iter = IterableSource.get_iter(self.source_desc)
next_iter = next(self.iter)
# Set the `raised_stop_iter` flag to False after the __next__ call, so that, if it raises StopIteration
# immediately after the reset, the wrapper can consistently raise StopIteration from then on.
# The `epoch_start` is not updated - keeping track of it is not necessary in the quiet mode
self.raised_stop_iter = False
return next_iter
@staticmethod
def get_iter(source_desc):
source = source_desc.source
if _is_generator_function(source):
source = source()
return iter(source)
class CallableSource:
def __init__(self, source_desc):
self.callback = source_desc.source
def __call__(self, scheduled : ScheduledTask):
task = scheduled.task
if task.is_sample_mode():
data_batch = [self.callback(sample_info) for sample_info in task.sample_range]
else:
data_batch = self.callback(*task.batch_args)
return data_batch
def get_source_from_desc(source_descs):
if source_descs.kind == SourceKind.CALLABLE:
return CallableSource(source_descs)
elif source_descs.kind in (SourceKind.ITERABLE, SourceKind.GENERATOR_FUNC):
return IterableSource(source_descs)
raise RuntimeError("Unsupported source type")
class WorkerContext:
"""Initializes structures necessary for a worker process to receive,
compute and send back tasks."""
def __init__(self, worker_args : WorkerArgs):
self.worker_id = worker_args.worker_id
self.callbacks = self._init_callbacks(worker_args.source_descs, worker_args.callback_pickler)
self.result_queue = worker_args.result_queue
self.general_task_queue = worker_args.general_task_queue
self.dedicated_task_queue = worker_args.dedicated_task_queue
shm_chunks = worker_args.shm_chunks
if worker_args.start_method != "fork":
setup_socket = worker_args.setup_socket
# NOTE when making any changes here, make sure to reflect them in the main process, so that
# it sends handles to objects in the same order they are set to objects here
self._recv_queue_handles(setup_socket)
for shm_chunk in shm_chunks:
shm_chunk.open_shm(reduction.recv_handle(setup_socket))
setup_socket.shutdown(socket.SHUT_RDWR)
setup_socket.close()
self.shm_chunks = {shm_chunk.shm_chunk_id : shm_chunk for shm_chunk in shm_chunks}
self.task_receiver = None
self.batch_dispatcher = None
try:
self.task_receiver = self._init_task_receiver()
self.batch_dispatcher = SharedBatchDispatcher(
worker_args.worker_id, worker_args.result_queue, self.task_receiver.get_recv_queues())
except:
self.close()
raise
# let the main process know that the worker started and shared resources setup is done
worker_args.result_queue.put([ShmMessageDesc(self.worker_id, 0, 0, 0, 0)])
def _init_callbacks(self, source_descs, callback_pickler):
if callback_pickler is not None:
for source_desc in source_descs.values():
source_desc.source = callback_pickler.loads(source_desc.source)
return {
context_i : get_source_from_desc(source_desc)
for context_i, source_desc in source_descs.items()}
def _recv_queue_handles(self, setup_socket):
self.result_queue.open_shm(reduction.recv_handle(setup_socket))
if self.general_task_queue is not None:
self.general_task_queue.open_shm(reduction.recv_handle(setup_socket))
if self.dedicated_task_queue is not None:
self.dedicated_task_queue.open_shm(reduction.recv_handle(setup_socket))
def _init_task_receiver(self):
assert self.general_task_queue is not None or self.dedicated_task_queue is not None
if self.dedicated_task_queue is None or self.general_task_queue is None:
return SimpleQueueTaskReceiver(self.general_task_queue or self.dedicated_task_queue)
return MixedTaskReceiver(self.dedicated_task_queue, self.general_task_queue)
def get_task(self) -> Tuple[Optional[ScheduledTask], Optional[BufShmChunk]]:
"""
Returns scheduled task and shm_chunk where results should be placed
"""
scheduled_meta = self.task_receiver.get_task()
if scheduled_meta is None:
return None, None
shm_chunk = self.shm_chunks[scheduled_meta.shm_chunk_id]
scheduled = read_shm_message(shm_chunk, scheduled_meta)
return scheduled, shm_chunk
def get_callback(self, scheduled):
return self.callbacks[scheduled.context_i]
def dispatch(self, processed : _WorkerProcessingResult):
return self.batch_dispatcher.append(processed)
def close(self):
if self.batch_dispatcher is not None:
self.task_receiver.close()
if self.task_receiver is not None:
self.batch_dispatcher.close()
def worker(worker_args : WorkerArgs):
"""Entry point of a worker process.
Computes minibatches in the main thread.
"""
worker_context = WorkerContext(worker_args)
try:
while True:
scheduled, shm_chunk = worker_context.get_task()
if scheduled is None:
break
callback = worker_context.get_callback(scheduled)
try:
data_batch = callback(scheduled)
for sample in data_batch:
assert_valid_data_type(sample)
except Exception as exception:
tb_str = traceback.format_exc()
processed = _WorkerProcessingResult.failed(scheduled, shm_chunk, exception, tb_str)
else:
processed = _WorkerProcessingResult.done(scheduled, shm_chunk, data_batch)
worker_context.dispatch(processed)
finally:
worker_context.close()
|
web.py
|
# coding=utf-8
from flask import Flask, request, url_for, redirect, session
import logging
from src import telegramClient
from src import config
import threading
import requests
import feedparser
from datetime import datetime
from time import mktime
from wtforms import form
import wtforms.fields
from src import db
import flask_admin
from flask_admin import Admin, expose
from flask_admin.contrib.pymongo import ModelView
from flask_simplelogin import SimpleLogin, login_required, is_logged_in
from src import subscriptiontask
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
app = Flask(__name__)
app.secret_key = config.FLASK_SECRET
SimpleLogin(app)
class MyAdminIndexView(flask_admin.AdminIndexView):
@expose('/')
def index(self):
if not is_logged_in('admin'):
return redirect(url_for('simplelogin.login', next=request.url))
return super(MyAdminIndexView, self).index()
class ChannelsForm(form.Form):
channel_id = wtforms.fields.StringField('channel_id')
uploader_name = wtforms.fields.StringField('uploader_name')
class ChannelsView(ModelView):
column_list = ('channel_id', 'uploader_name', 'created')
column_labels = {'channel_id': 'Channel ID', 'uploader_name': 'Channel name'}
form = ChannelsForm
def on_model_change(self, form, model, is_created):
if is_created:
subscriptiontask.subscribe_channel(model["channel_id"])
model['created'] = datetime.utcnow()
def on_model_delete(self, model):
subscriptiontask.unsubscribe_channel(model["channel_id"])
def is_accessible(self):
return is_logged_in('admin')
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect(url_for('simplelogin.login', next=request.url))
class BackupUrl(flask_admin.BaseView):
@expose('/', methods=('GET', 'POST'))
def index(self):
if not is_logged_in('admin'):
return redirect(url_for('simplelogin.login', next=request.url))
result = None
if request.method == 'POST':
url = request.form.get('url')
if url:
download_and_send(url)
result = 'In proccess'
else:
result = 'url not set'
session['result'] = result
return redirect(url_for('upload_from_url.index'))
elif request.method == 'GET':
result = session.pop('result', None)
return self.render('upload_from_url.html', result=result)
admin = Admin(app, name=config.APP_NAME, template_mode='bootstrap3', index_view=MyAdminIndexView(),
base_template='my_master.html')
admin.add_view(ChannelsView(db.db['channels']))
admin.add_view(BackupUrl(name='Backup From Youtube URL', endpoint='upload_from_url'))
def download_and_send(url):
threading.Thread(target=telegramClient.main, args=(url,)).start()
@app.route('/', methods=['GET'])
def go_to_admin():
return redirect(url_for('admin.index'))
@app.route('/tasks/subscribepubsubhub', methods=['GET'])
def pubsubhubbub_subscribe():
mode = request.args.get('mode') or 'subscribe'
youtube_channel_id = request.args.get('channel_id')
if not youtube_channel_id:
logging.log(logging.WARNING, 'youtube channel id is missing')
return 'not ok. youtube channel id is missing'
if mode not in ('subscribe', 'unsubscribe'):
logging.log(logging.WARNING, 'request mode is unknown')
return 'not ok. request mode is unknown'
log_prefix = 'un' if mode is "unsubscribed" else ''
data = {
'hub.callback': 'https://{}.herokuapp.com/pubsubhub{}'.format(
config.APP_NAME.lower(), config.PUBSUBHUB_SECRET_PART),
'hub.mode': mode,
'hub.topic': 'https://www.youtube.com/xml/feeds/videos.xml?channel_id=%s' % youtube_channel_id
}
resp = requests.post(
url='https://pubsubhubbub.appspot.com/subscribe',
data=data
)
if resp.status_code == 202:
logging.log(logging.INFO, "Channel %s %ssubscribed successfully" % (log_prefix, youtube_channel_id))
return 'ok'
else:
logging.log(logging.WARNING, "Channel %s %ssubscribe FAILED" % (log_prefix, youtube_channel_id))
return 'not ok'
@app.route('/pubsubhub' + config.PUBSUBHUB_SECRET_PART, methods=['GET', 'POST'])
def pubsubhubbub_callback():
# pubsubhub subscription verification procedure
# http://pubsubhubbub.github.io/PubSubHubbub/pubsubhubbub-core-0.4.html#verifysub
if request.args.get('hub.challenge'):
return request.args.get('hub.challenge')
data = request.get_data()
logging.log(logging.INFO, data)
feed = feedparser.parse(data)
for entry in feed['entries']:
# if entry.yt_channelid not in allowed_channels:
# logging.log(logging.WARNING, "This channel not allowed: %s" % entry.author.uri)
# break
tnow = datetime.utcnow()
dt = datetime.fromtimestamp(mktime(entry.published_parsed))
if (tnow - dt).total_seconds() / 3600 > 1:
logging.log(logging.INFO, 'Too old entry %s' % entry.published)
break
else:
logging.log(logging.INFO, 'Adding new youtube video %s' % entry.yt_videoid)
download_and_send(entry.link)
break
return 'ok'
if __name__ == '__main__':
app.run(host='127.0.0.1', port=80, debug=True)
|
serialize_tensorboard.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
import os.path
import shutil
import StringIO
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
DEFAULT_SUFFIX = '.json'
IMAGE_SUFFIX = '.png'
AUDIO_SUFFIX = '.wav'
GRAPH_SUFFIX = '.pbtxt'
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(os.path.join(target_path, 'data'))
self.path = target_path
def GetAndSave(self, url, save_suffix, unzip=False):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET',
'/data/' + url,
headers={'content-type': 'text/plain'})
response = self.connection.getresponse()
destination = self.path + '/data/' + Clean(url) + save_suffix
if response.status != 200:
raise IOError(url)
if unzip:
s = StringIO.StringIO(response.read())
content = gzip.GzipFile(fileobj=s).read()
else:
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url, DEFAULT_SUFFIX))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
url = Url('graph', {'run': run})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url, IMAGE_SUFFIX)
elif tag_type == 'audio':
for t in tags:
audio = self.GetRouteAndSave('audio', {'run': run, 'tag': t})
for snd in audio:
url = 'individualAudio?' + snd['query']
# pull down the audio clips themselves
self.GetAndSave(url, AUDIO_SUFFIX)
elif tag_type == 'run_metadata':
for t in tags:
url = Url('run_metadata', {'run': run, 'tag': t})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except IOError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
|
testing.py
|
"""Testing utilities."""
import os
import re
import threading
import functools
from tempfile import NamedTemporaryFile
from numpy import testing
import numpy as np
from ._warnings import expected_warnings
import warnings
from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte
SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def doctest_skip_parser(func):
""" Decorator replaces custom skip test markup in doctests
Say a function has a docstring::
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> something # skip if HAVE_BMODULE
This decorator will evaluate the expression after ``skip if``. If this
evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If
False, then the comment is just removed. The expression is evaluated in the
``globals`` scope of `func`.
For example, if the module global ``HAVE_AMODULE`` is False, and module
global ``HAVE_BMODULE`` is False, the returned function will have docstring::
>>> something # doctest: +SKIP
>>> something + else
>>> something
"""
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
# Works as a function decorator
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
# Works as a class decorator
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(img, plugin, suffix):
"""Save and read an image using a specified plugin"""
if not '.' in suffix:
suffix = '.' + suffix
temp_file = NamedTemporaryFile(suffix=suffix, delete=False)
fname = temp_file.name
temp_file.close()
io.imsave(fname, img, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
"""Check roundtrip behavior for color images.
All major input types should be handled as ubytes and read
back correctly.
"""
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision loss']):
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['sign loss|precision loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
with expected_warnings(['precision loss']):
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
"""Check the roundtrip behavior for images that support most types.
All major input types should be handled.
"""
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision|\A\Z']):
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss|\A\Z']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['precision loss|sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
"""Default package level setup routine for skimage tests.
Import packages known to raise warnings, and then
force warnings to raise errors.
Also set the random seed to zero.
"""
warnings.simplefilter('default')
from scipy import signal, ndimage, special, optimize, linalg
from scipy.io import loadmat
from skimage import viewer
np.random.seed(0)
warnings.simplefilter('error')
def teardown_test():
"""Default package level teardown routine for skimage tests.
Restore warnings to default behavior
"""
warnings.simplefilter('default')
def test_parallel(num_threads=2):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
"""
assert num_threads > 0
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
threads = []
for i in range(num_threads - 1):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
threads.append(thread)
for thread in threads:
thread.start()
result = func(*args, **kwargs)
for thread in threads:
thread.join()
return result
return inner
return wrapper
if __name__ == '__main__':
color_check('pil')
mono_check('pil')
mono_check('pil', 'bmp')
mono_check('pil', 'tiff')
|
test_websocket_integration.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the Websocket client integration."""
from unittest import mock
from threading import Thread
from queue import Queue
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.compiler import assemble, transpile
from qiskit.providers import JobTimeoutError
from qiskit.providers.ibmq.api.clients.websocket import (
WebsocketClient, WebsocketAuthenticationMessage)
from qiskit.providers.ibmq.api.clients import AccountClient
from qiskit.providers.ibmq.ibmqfactory import IBMQFactory
from qiskit.providers.jobstatus import JobStatus
from ...ibmqtestcase import IBMQTestCase
from ...decorators import requires_qe_access, run_on_device
class TestWebsocketIntegration(IBMQTestCase):
"""Websocket integration tests."""
@classmethod
@requires_qe_access
def _get_provider(cls, qe_token=None, qe_url=None):
"""Helper for getting account credentials."""
ibmq_factory = IBMQFactory()
provider = ibmq_factory.enable_account(qe_token, qe_url)
return provider
def setUp(self):
self.provider = self._get_provider()
self.sim_backend = self.provider.get_backend(simulator=True)
# Create a circuit
qr = QuantumRegister(1)
cr = ClassicalRegister(1)
self.qc1 = QuantumCircuit(qr, cr, name='qc1')
self.qc1.measure(qr[0], cr[0])
# Create a default Qobj using the simulator.
self.circuit = transpile(self.qc1, backend=self.sim_backend)
self.qobj = assemble(self.circuit, backend=self.sim_backend, shots=1)
def test_websockets_simulator(self):
"""Test checking status of a job via websockets for a simulator."""
job = self.sim_backend.run(self.qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
result = job.result()
self.assertEqual(result.status, 'COMPLETED')
@run_on_device
def test_websockets_device(self, provider, backend): # pylint: disable=unused-argument
"""Test checking status of a job via websockets for a device."""
qc = transpile(self.qc1, backend=backend)
qobj = assemble(qc, backend=backend)
job = backend.run(qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
result = job.result(timeout=180)
self.assertTrue(result.success)
def test_websockets_job_final_state(self):
"""Test checking status of a job in a final state via websockets."""
job = self.sim_backend.run(self.qobj)
job._wait_for_completion()
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
# Pretend we haven't seen the final status
job._status = JobStatus.RUNNING
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
def test_websockets_retry_bad_url(self):
"""Test http retry after websocket error due to an invalid URL."""
job = self.sim_backend.run(self.qobj)
saved_websocket_url = job._api.client_ws.websocket_url
try:
# Use fake websocket address.
job._api.client_ws.websocket_url = 'wss://wss.localhost'
# _wait_for_completion() should retry with http successfully
# after getting websockets error.
job._wait_for_completion()
finally:
job._api.client_ws.websocket_url = saved_websocket_url
self.assertIs(job._status, JobStatus.DONE)
@mock.patch.object(WebsocketClient, '_authentication_message',
return_value=WebsocketAuthenticationMessage(
type_='authentication', data='phantom_token'))
def test_websockets_retry_bad_auth(self, _):
"""Test http retry after websocket error due to a failed authentication."""
job = self.sim_backend.run(self.qobj)
with mock.patch.object(AccountClient, 'job_status',
side_effect=job._api.job_status) as mocked_wait:
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
mocked_wait.assert_called_with(job.job_id())
def test_websockets_retry_connection_closed(self):
"""Test http retry after websocket error due to closed connection."""
def _job_status_side_effect(*args, **kwargs):
"""Side effect function to restore job ID"""
# pylint: disable=unused-argument
job._job_id = saved_job_id
return saved_job_status(saved_job_id)
job = self.sim_backend.run(self.qobj)
# Save the originals.
saved_job_id = job._job_id
saved_job_status = job._api.job_status
# Use bad job ID to fail the status retrieval.
job._job_id = '12345'
# job.result() should retry with http successfully after getting websockets error.
with mock.patch.object(AccountClient, 'job_status',
side_effect=_job_status_side_effect):
job._wait_for_completion()
self.assertIs(job._status, JobStatus.DONE)
def test_websockets_timeout(self):
"""Test timeout checking status of a job via websockets."""
qc = transpile(self.qc1, backend=self.sim_backend)
qobj = assemble(qc, backend=self.sim_backend, shots=2048)
job = self.sim_backend.run(qobj)
with self.assertRaises(JobTimeoutError):
job.result(timeout=0.1)
def test_websockets_multi_job(self):
"""Test checking status of multiple jobs in parallel via websockets."""
def _run_job_get_result(q):
job = self.sim_backend.run(self.qobj)
# Manually disable the non-websocket polling.
job._api._job_final_status_polling = None
job._wait_for_completion()
if job._status is not JobStatus.DONE:
q.put(False)
max_threads = 2
result_q = Queue()
job_threads = []
for i in range(max_threads):
job_thread = Thread(target=_run_job_get_result, args=(result_q,),
name="job_result_{}".format(i), daemon=True)
job_thread.start()
job_threads.append(job_thread)
for job_thread in job_threads:
job_thread.join()
self.assertTrue(result_q.empty())
|
multprocess_test.py
|
#!/usr/bin/python
"""
mutiple process example
"""
import random
from multiprocessing import Process, Queue
import os
def put_num(que):
num = random.randint(1, 100)
que.put(num)
print(f'put num {num} on pid {os.getpid()}')
def main():
queue = Queue()
childs = []
for i in range(4):
p = Process(target=put_num, args=(queue,))
childs.append(p)
p.start()
for p in childs:
p.join(10 * 1000)
results = [queue.get() for p in childs]
print(results)
if __name__ == '__main__':
main()
|
client.py
|
# ###############----------------->import<--------------------############
import socket
from PIL import Image, ImageGrab, ImageTk
import pygetwindow
import re
import os
import time
import win32gui
import lz4.frame
from io import BytesIO
from threading import Thread
from multiprocessing import freeze_support, Process, Queue as Multiprocess_queue
from pynput.keyboard import Listener as Key_listener
from pynput.mouse import Button, Listener as Mouse_listener
import tkinter as tk
from tkinter.font import Font
from tkinter import ttk, messagebox, filedialog
import connection
import win32api
from datetime import datetime
def send_event(msg, sock):
connection.send_data(sock, 2, msg)
def get_mouse_data_from_queue(sock, event_queue, resize, cli_width, cli_height, dis_width, dis_height):
while True:
event_code = event_queue.get()
x = event_queue.get()
y = event_queue.get()
x, y, within_display = check_within_display(x, y, resize, cli_width, cli_height, dis_width, dis_height)
if event_code == 0 or event_code == 7:
if within_display:
if event_code == 7:
x = event_queue.get()
y = event_queue.get()
msg = bytes(f"{event_code:<2}" + str(x) + "," + str(y), "utf-8")
send_event(msg, sock)
# print(f"Event data: {msg}")
elif event_code in range(1, 10):
if within_display:
msg = bytes(f"{event_code:<2}", "utf-8")
send_event(msg, sock)
def scale_x_y(x, y, cli_width, cli_height, dis_width, dis_height):
scale_x = cli_width / dis_width
scale_y = cli_height / dis_height
x *= scale_x
y *= scale_y
return round(x, 1), round(y, 1)
def check_within_display(x, y, resize, cli_width, cli_height, dis_width, dis_height):
active_window = pygetwindow.getWindowsWithTitle(f"Remote Desktop")
if active_window and (len(active_window) == 1):
x, y = win32gui.ScreenToClient(active_window[0]._hWnd, (x, y))
if (0 <= x <= dis_width) and (0 <= y <= dis_height):
if resize:
x, y = scale_x_y(x, y, cli_width, cli_height, dis_width, dis_height)
return x, y, True
return x, y, False
def on_move(x, y):
# print("Mouse listener working")
mouse_event_queue.put(0) # event_code
mouse_event_queue.put(x)
mouse_event_queue.put(y)
def on_click(x, y, button, pressed):
if pressed: # mouse down(press)
mouse_event_queue.put(button_code.get(button)[0])
mouse_event_queue.put(x)
mouse_event_queue.put(y)
else: # mouse up(release)
mouse_event_queue.put(button_code.get(button)[1])
mouse_event_queue.put(x)
mouse_event_queue.put(y)
def on_scroll(x, y, dx, dy):
mouse_event_queue.put(7) # event_code
mouse_event_queue.put(x)
mouse_event_queue.put(y)
mouse_event_queue.put(dx)
mouse_event_queue.put(dy)
def key_events(key, event_code):
active_window = pygetwindow.getActiveWindow()
if active_window:
# print("Keyboard listener working")
if active_window.title == f"Remote Desktop":
try:
if key.char:
msg = bytes(event_code + key.char, "utf-8") # alphanumeric key
send_event(msg, remote_server_socket)
except AttributeError:
msg = bytes(event_code + key.name, "utf-8") # special key
send_event(msg, remote_server_socket)
def on_press(key):
key_events(key, "-1")
def on_release(key):
key_events(key, "-2")
def recv_and_put_into_queue(client_socket, jpeg_queue):
header_size = 10
partial_prev_msg = bytes()
try:
while True:
msg = connection.receive_data(client_socket, header_size, partial_prev_msg)
if msg:
jpeg_queue.put(lz4.frame.decompress(msg[0])) # msg[0]--> new msg
partial_prev_msg = msg[1] # msg[1]--> partial_prev_msg
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError, OSError) as e:
print(e.strerror)
except ValueError:
pass
finally:
print("Thread2 automatically exits")
def display_data(jpeg_queue, status_queue, dis_width, dis_height, resize):
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
pygame.init()
display_surface = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption(f"Remote Desktop")
clock = pygame.time.Clock()
display = True
while display:
for event in pygame.event.get():
if event.type == pygame.QUIT:
status_queue.put("stop")
pygame.quit()
return
# start_time = time.time()
jpeg_buffer = BytesIO(jpeg_queue.get())
img = Image.open(jpeg_buffer)
py_image = pygame.image.frombuffer(img.tobytes(), img.size, img.mode)
# print(f"Display: {(time.time() - start_time):.4f}")
if resize:
py_image = pygame.transform.scale(py_image, (dis_width, dis_height))
# img = img.resize((display_width, display_height))
jpeg_buffer.close()
display_surface.blit(py_image, (0, 0))
print(f"Fps: {int(clock.get_fps())}")
pygame.display.flip()
clock.tick(60)
def cleanup_process_threads():
# process2.join()
# process1.kill()
# process1.join()
process_list = [process1, process2]
for process in process_list:
if process:
if process.is_alive():
process.kill()
process.join()
listener_key.stop()
listener_key.join()
listener_mouse.stop()
listener_mouse.join()
# thread2.join()
print("cleanup finished")
def cleanup_display_process(status_queue):
if status_queue.get() == "stop":
connection.send_data(command_server_socket, COMMAND_HEADER_SIZE, bytes("stop_capture", "utf-8"))
cleanup_process_threads()
def compare_and_compute_resolution(cli_width, cli_height, ser_width, ser_height):
resolution_tuple = ((7680, 4320), (3840, 2160), (2560, 1440), (1920, 1080), (1600, 900), (1366, 768), (1280, 720),
(1152, 648), (1024, 576), (2560, 1600), (1920, 1200), (1680, 1050), (1440, 900), (1280, 800),
(2048, 1536), (1920, 1440), (1856, 1392), (1600, 1200), (1440, 1080), (1400, 1050), (1280, 960),
(1024, 768), (960, 720), (800, 600), (640, 480))
if cli_width >= ser_width or cli_height >= ser_height:
for resolution in resolution_tuple:
if (resolution[0] <= ser_width and resolution[1] <= ser_height) and (resolution != (ser_width, ser_height)):
return resolution
else:
return ser_width, ser_height
else:
return cli_width, cli_height
def remote_display():
global thread2, listener_key, listener_mouse, process1, process2, remote_server_socket, mouse_event_queue
print("Sending start_capture message")
connection.send_data(command_server_socket, COMMAND_HEADER_SIZE, bytes("start_capture", "utf-8"))
print("Sent start_capture message")
disable_choice = messagebox.askyesno("Remote Box", "Disable the remote computer wallpaper?(yes recommended)")
# disable_choice = connection.retry("Disable the remote computer wallpaper?(recommended):")
# remote display socket
remote_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_server_socket.connect((server_ip, server_port))
# wallpaper_settings
print(f"Disable choice: {disable_choice}")
connection.send_data(remote_server_socket, COMMAND_HEADER_SIZE, bytes(str(disable_choice), "utf-8"))
print("\n")
print(f">>You can now CONTROL the remote desktop now")
resize_option = False
server_width, server_height = ImageGrab.grab().size
client_resolution = connection.receive_data(remote_server_socket, 2, bytes(), 1024)[0].decode("utf-8")
client_width, client_height = client_resolution.split(",")
display_width, display_height = compare_and_compute_resolution(int(client_width), int(client_height), server_width,
server_height)
# display_msg = bytes(str(display_width) + "," + str(display_height), "utf-8")
# connection.send_data(clientsocket, 2, display_msg)
if (client_width, client_height) != (display_width, display_height):
resize_option = True
jpeg_sync_queue = Multiprocess_queue()
thread2 = Thread(target=recv_and_put_into_queue, name="recv_stream", args=(remote_server_socket, jpeg_sync_queue),
daemon=True)
thread2.start()
listener_key = Key_listener(on_press=on_press, on_release=on_release)
listener_key.start()
mouse_event_queue = Multiprocess_queue()
process1 = Process(target=get_mouse_data_from_queue, args=(remote_server_socket, mouse_event_queue, resize_option,
int(client_width), int(client_height), display_width,
display_height), daemon=True)
process1.start()
listener_mouse = Mouse_listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll)
listener_mouse.start()
execution_status_queue = Multiprocess_queue()
process2 = Process(target=display_data, args=(jpeg_sync_queue, execution_status_queue, display_width, display_height
, resize_option), daemon=True)
process2.start()
thread3 = Thread(target=cleanup_display_process, args=(execution_status_queue,), daemon=True)
thread3.start()
def login():
# resize_option = False
global command_server_socket, remote_server_socket, chat_server_socket, file_server_socket, thread1, server_ip, \
server_port
server_ip = name_entry.get()
server_port = int(port_entry.get())
server_pass = pass_entry.get()
if len(server_pass) == 6 and server_pass.strip() != "":
try:
command_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
command_server_socket.connect((server_ip, server_port))
server_pass = bytes(server_pass, "utf-8")
connection.send_data(command_server_socket, 2, server_pass) # send password
login_response = connection.receive_data(command_server_socket, 2, bytes(), 1024)[0].decode("utf-8")
if login_response != "1":
print("WRONG Password!..")
else:
# chat socket
chat_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
chat_server_socket.connect((server_ip, server_port))
# file transfer socket
file_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
file_server_socket.connect((server_ip, server_port))
print("\n")
print("Connected to the remote computer!")
#label_status.configure(state='normal')
label_status.grid()
execute = False
thread1 = Thread(target=listen_for_commands, daemon=True)
thread1.start()
# thread for chat
recv_chat_msg_thread = Thread(target=receive_chat_message, name="recv_chat_msg_thread", daemon=True)
recv_chat_msg_thread.start()
# Enable
disconnect_button.configure(state="normal")
my_notebook.add(chat_frame, text=" Chat ")
# my_notebook.add(file_transfer_frame, text=" File Transfer ")
access_button_frame.grid(row=7, column=0, padx=45, pady=20, columnspan=2, sticky=tk.W + tk.E)
# Disable
name_entry.configure(state="disabled")
port_entry.configure(state="disabled")
pass_entry.configure(state="disabled")
connect_button.configure(state="disabled")
except OSError as e:
#label_status.configure(state='disabled')
label_status.grid_remove()
print(e.strerror)
else:
print("Password is not 6 characters")
def close_sockets():
service_socket_list = [command_server_socket, remote_server_socket, chat_server_socket, file_server_socket]
for sock in service_socket_list:
if sock:
sock.close()
print("Closed all the sockets")
def disconnect(caller):
if caller == "button":
connection.send_data(command_server_socket, COMMAND_HEADER_SIZE, bytes("disconnect", "utf-8"))
close_sockets()
# Enable
name_entry.configure(state="normal")
port_entry.configure(state="normal")
pass_entry.configure(state="normal")
connect_button.configure(state="normal")
# Disable
disconnect_button.configure(state="disabled")
#label_status.configure(state='disabled')
label_status.grid_remove()
access_button_frame.grid_forget()
my_notebook.hide(1)
my_notebook.hide(2)
def listen_for_commands():
listen = True
try:
while listen:
msg = connection.receive_data(command_server_socket, COMMAND_HEADER_SIZE, bytes(), 1024)[0].decode("utf-8")
if msg == "disconnect":
listen = False
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError, OSError) as e:
print(e.strerror)
except ValueError:
pass
finally:
if (file_window is not None) and file_window.winfo_exists():
file_window.destroy()
print("top window destroyed")
#label_status.configure(state='disabled')
label_status.grid_remove()
disconnect("message")
print("Thread1 automatically exits")
def add_text_chat_display_widget(msg, name):
text_chat_widget.configure(state=tk.NORMAL)
text_chat_widget.insert(tk.END, "\n")
text_chat_widget.insert(tk.END, name + ": " + msg)
text_chat_widget.configure(state="disabled")
def send_chat_message(event):
try:
msg = input_text_widget.get()
if msg and msg.strip() != "":
input_text_widget.delete(0, "end")
connection.send_data(chat_server_socket, CHAT_HEADER_SIZE, bytes(msg, "utf-8"))
add_text_chat_display_widget(msg, LOCAL_CHAT_NAME)
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError, OSError) as e:
print(e.strerror)
def receive_chat_message():
try:
while True:
msg = connection.receive_data(chat_server_socket, CHAT_HEADER_SIZE, bytes())[0].decode("utf-8")
add_text_chat_display_widget(msg, REMOTE_CHAT_NAME)
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError, OSError) as e:
print(e.strerror)
except ValueError:
pass
def upload_file(filename):
# needs the location of the file to upload
file_size = os.stat(filename).st_size
connection.send_data(file_server_socket, FILE_HEADER_SIZE, bytes(str(file_size), "utf-8"))
file_mode = "r" if (len(filename) > 4) and (filename[len(filename) - 4:] == ".txt") else "rb"
client_file_mode = "w" if file_mode == "r" else "wb"
connection.send_data(file_server_socket, FILE_HEADER_SIZE, bytes(client_file_mode, "utf-8"))
data = True
with open(filename, file_mode) as f:
while data:
if file_mode == "r":
data = bytes(f.read(65536), "utf-8")
elif file_mode == "rb":
data = f.read(65536)
if data:
connection.send_data(file_server_socket, FILE_HEADER_SIZE, data)
def file_explore(system_name):
# global LOCAL_PATH
system_values = system_choice.get(system_name)
if system_name == "local":
try:
# scan the current path to get the path details
obj = os.scandir(system_values[2])
except PermissionError:
print("No permission to access this resource")
back_button("restore_prev_path", system_name)
return
else:
pass
count = 0
for entry in obj:
if entry.is_dir() or entry.is_file():
st = entry.stat()
# modification datetime
mod = datetime.fromtimestamp(st.st_mtime).strftime("%d-%m-%Y %I:%M %p")
if entry.is_dir():
typ = "File folder"
size = ""
update_treeview(system_values[0], count, folder_img, (entry.name, typ, size, mod))
count += 1
else:
typ = "File"
# get the extension of file to define the file type and icon of the file
match = re.search(r"\.(\w+)$", entry.name)
display_img = file_img
if match:
ext = match.group(1)
if ext.lower() == "pdf":
display_img = pdf_img
elif ext.lower() == "exe":
display_img = exe_img
elif ext.lower() in ("jpg", "png", "bmp", "gif", "tiff"):
display_img = photo_img
elif ext.lower() == "txt":
display_img = txt_img
elif ext.lower() in ("doc", "docx", "docm"):
display_img = word_img
elif ext.lower() in ("ppt", "pptx", "pptm"):
display_img = powerpoint_img
elif ext.lower() in ("xlsx", "xlsm"):
display_img = excel_img
elif ext.lower() in ("mp4", "mov", "wmv", "flv", "avi", "mkv"):
display_img = video_img
elif ext.lower() in ("3gp", "mp3", "wav", "webm", "m4a"):
display_img = music_img
elif ext.lower() in ("zip", "7z", "rar", "tar", "iso", "gz", "sfx", "apk"):
display_img = zip_img
typ = ext.upper() + " " + typ
# Get the appropriate size
if st.st_size < 1024:
size = str(st.st_size) + " bytes"
elif st.st_size < 1048576:
size = str(round(st.st_size / 1024, 1)) + " KB"
elif st.st_size < 1073741824:
size = str(round(st.st_size / (1024 * 1024), 1)) + " MB"
else:
size = str(round(st.st_size / (1024 * 1024 * 1024), 1)) + " GB"
# insert data into the treeview
update_treeview(system_values[0], count, display_img, (entry.name, typ, size, mod))
count += 1
system_values[1].delete(0, tk.END)
system_values[1].insert(0, system_values[2].replace(r"\\", "\\"))
def drive_letters(system_name):
system_values = system_choice.get(system_name)
drives = win32api.GetLogicalDriveStrings()
drives = tuple(drives.split(':\\\000')[:-1])
for i in range(0, len(drives)):
update_treeview(system_values[0], i, drive_img, (drives[i], "Drive", "", ""))
def back_button(event, system_name):
# global LOCAL_PATH
# list of directories excluding 2 elements i.e the empty string and the last directory
system_values = system_choice.get(system_name)
li_of_dir = system_values[2].split(r"\\")[:-2]
if li_of_dir:
system_choice.get(system_name)[2] = r""
# joining each element of list with "\\" to form the new path
for name in li_of_dir:
system_choice.get(system_name)[2] += name + r"\\"
if event == "button":
file_explore(system_name)
else:
# clear the local path displayed
system_values[1].delete(0, tk.END)
drive_letters(system_name)
def change_path(event=None, system_name=None):
# global LOCAL_PATH, REMOTE_PATH
system_values = system_choice.get(system_name)
items = system_values[0].selection()
if len(items) == 1:
col_values = system_values[0].item(items[0], 'values')
print(f"Selection : {col_values[0]}")
if col_values[1] == "File folder" or col_values[1] == "Drive":
if col_values[1] == "Drive":
system_choice.get(system_name)[2] = col_values[0] + r":\\"
elif col_values[1] == "File folder":
system_choice.get(system_name)[2] += col_values[0] + r"\\"
print(f"Changing path to : {system_choice.get(system_name)[2]}") # for i in items:
file_explore(system_name)
def update_treeview(file_tree, iid, img, value):
if iid == 0:
file_tree.delete(*file_tree.get_children())
file_tree.insert(parent="", index="end", iid=iid, text="", image=img, values=value)
def recv_file_details(files_details_queue):
recv = True
partial_prev_msg = bytes()
try:
while recv:
msg = connection.receive_data(browse_file_server_socket, FILE_HEADER_SIZE, partial_prev_msg)
if msg:
files_details_queue.put(lz4.frame.decompress(msg[0])) # msg[0]--> new msg
partial_prev_msg = msg[1] # msg[1]--> partial_prev_msg
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError, OSError) as e:
print(e.strerror)
except ValueError:
pass
finally:
print("recv_file_details_process automatically exits")
def local_file_treeview():
global local_file_tree, local_path_entry
local_top_file_frame = tk.LabelFrame(file_window, bd=0)
local_top_file_frame.grid(row=0, column=0, sticky=tk.W + tk.E, padx=10, pady=10)
local_file_frame = tk.LabelFrame(file_window, bd=0)
local_file_frame.grid(row=1, column=0, sticky=tk.W + tk.E, padx=10)
# path entry field
local_path_entry = tk.Entry(local_top_file_frame, width=45)
local_path_entry.configure(font=("Helvetica", 11, "bold"))
local_path_entry.grid(row=0, column=1, columnspan=2, sticky=tk.W + tk.E)
# scrollbar
local_scroll_file_widget = tk.Scrollbar(local_file_frame)
local_scroll_file_widget.grid(row=1, column=1, sticky=tk.N + tk.S)
# Treeview
local_file_tree = ttk.Treeview(local_file_frame, yscrollcommand=local_scroll_file_widget.set)
local_scroll_file_widget.configure(command=local_file_tree.yview)
# defining the columns
local_file_tree["columns"] = ("Name", "Type", "Size", "Modified")
# formatting the columns
local_file_tree.column("#0", width=50, minwidth=50, anchor=tk.W, stretch=tk.NO)
local_file_tree.column("Name", anchor=tk.W, width=130, minwidth=60)
local_file_tree.column("Type", anchor=tk.W, width=100, minwidth=50)
local_file_tree.column("Size", anchor=tk.CENTER, width=80, minwidth=40)
local_file_tree.column("Modified", anchor=tk.W, width=80, minwidth=40)
# Heading of columns
local_file_tree.heading("#0", text="", anchor=tk.W)
local_file_tree.heading("Name", text="Name", anchor=tk.W)
local_file_tree.heading("Type", text="Type", anchor=tk.W)
local_file_tree.heading("Size", text="Size", anchor=tk.CENTER)
local_file_tree.heading("Modified", text="Modified", anchor=tk.W)
# local back button
local_prev_dir_button = tk.Button(local_top_file_frame, bd=0, image=back_img,
command=lambda: back_button("button", "local"))
local_prev_dir_button.grid(row=0, column=0, sticky=tk.W, padx=10)
local_path_entry.insert(0, LOCAL_PATH)
tree_style = ttk.Style()
tree_style.configure('Treeview', rowheight=40)
local_file_tree.bind("<Double-1>", lambda event: change_path(event, system_name="local"))
local_file_tree.grid(row=1, column=0, pady=10, sticky=tk.W + tk.E)
system_choice["local"][0] = local_file_tree
system_choice["local"][1] = local_path_entry
def remote_file_treeview():
global remote_file_tree, remote_path_entry
remote_top_file_frame = tk.LabelFrame(file_window, bd=0)
remote_top_file_frame.grid(row=0, column=1, sticky=tk.W + tk.E, padx=10, pady=10)
remote_file_frame = tk.LabelFrame(file_window, bd=0)
remote_file_frame.grid(row=1, column=1, sticky=tk.W + tk.E, padx=10)
# path entry field
remote_path_entry = tk.Entry(remote_top_file_frame, width=45)
remote_path_entry.configure(font=("Helvetica", 11, "bold"))
remote_path_entry.grid(row=0, column=1, columnspan=2, sticky=tk.W + tk.E)
# scrollbar
remote_scroll_file_widget = tk.Scrollbar(remote_file_frame)
remote_scroll_file_widget.grid(row=1, column=1, sticky=tk.N + tk.S)
# Treeview
remote_file_tree = ttk.Treeview(remote_file_frame, yscrollcommand=remote_scroll_file_widget.set)
remote_scroll_file_widget.configure(command=remote_file_tree.yview)
# defining the columns
remote_file_tree["columns"] = ("Name", "Type", "Size", "Modified")
# formatting the columns
remote_file_tree.column("#0", width=50, minwidth=50, anchor=tk.W, stretch=tk.NO)
remote_file_tree.column("Name", anchor=tk.W, width=130, minwidth=130)
remote_file_tree.column("Type", anchor=tk.W, width=100, minwidth=100)
remote_file_tree.column("Size", anchor=tk.CENTER, width=80, minwidth=80)
remote_file_tree.column("Modified", anchor=tk.W, width=80, minwidth=80)
# Heading of columns
remote_file_tree.heading("#0", text="", anchor=tk.W)
remote_file_tree.heading("Name", text="Name", anchor=tk.W)
remote_file_tree.heading("Type", text="Type", anchor=tk.W)
remote_file_tree.heading("Size", text="Size", anchor=tk.CENTER)
remote_file_tree.heading("Modified", text="Modified", anchor=tk.W)
# remote back button
remote_prev_dir_btn = tk.Button(remote_top_file_frame, bd=0, image=back_img,
command=lambda: back_button("button", "remote"))
remote_prev_dir_btn.grid(row=0, column=0, sticky=tk.W, padx=10)
remote_path_entry.insert(0, REMOTE_PATH)
remote_tree_style = ttk.Style()
remote_tree_style.configure('Treeview', rowheight=40)
remote_file_tree.bind("<Double-1>", lambda event: change_path(event, system_name="remote"))
remote_file_tree.grid(row=1, column=0, pady=10, sticky=tk.W + tk.E)
system_choice["remote"][0] = remote_file_tree
system_choice["remote"][1] = remote_path_entry
def check_window_closed():
window_open = True
while window_open:
if (file_window is not None) and file_window.winfo_exists():
time.sleep(3)
continue
else:
file_button.configure(state="normal")
window_open = False
def file_transfer_window():
global file_window
file_button.configure(state="disabled")
file_window = tk.Toplevel()
file_window.title("File Transfer")
file_window.resizable(False, False)
# local file treeview
local_file_treeview()
# remote file_treeview
remote_file_treeview()
files_details_queue = Multiprocess_queue()
connection.send_data(command_server_socket, COMMAND_HEADER_SIZE, bytes("start_file_explorer", "utf-8"))
recv_file_details_process = Process(target=recv_file_details, args=(files_details_queue,),
name="recv_file_details_process", daemon=True)
recv_file_details_process.start()
update_file_gui_thread = Thread(target=update_treeview, name="update_file_gui_thread", daemon=True)
update_file_gui_thread.start()
check_window_status_thread = Thread(target=check_window_closed, name="check_window_status", daemon=True)
check_window_status_thread.start()
drive_letters("local")
drive_letters("remote")
if __name__ == "__main__":
freeze_support()
command_server_socket = None
remote_server_socket = None
chat_server_socket = None
file_server_socket = None
browse_file_server_socket = None
thread1 = None
thread2 = None
listener_key = None
listener_mouse = None
process1 = None
process2 = None
server_ip = str()
server_port = int()
status_event_log = 1
# local_drives = None
# remote_drives = None
LOCAL_PATH = r""
local_file_tree = None
local_path_entry = None
REMOTE_PATH = r""
remote_file_tree = None
remote_path_entry = None
file_window = None
COMMAND_HEADER_SIZE = 2
CHAT_HEADER_SIZE = 10
FILE_HEADER_SIZE = 10
LOCAL_CHAT_NAME = "Me"
REMOTE_CHAT_NAME = "Remote Box"
button_code = {Button.left: (1, 4), Button.right: (2, 5), Button.middle: (3, 6)}
system_choice = {"local": [local_file_tree, local_path_entry, LOCAL_PATH],
"remote": [remote_file_tree, remote_path_entry, REMOTE_PATH]}
# Create Root Window
root = tk.Tk()
root.title("Remote Box")
root.iconbitmap("logo.ico")
root.resizable(False, False)
# icons
folder_img = tk.PhotoImage(file="file_icons\\folder.png")
file_img = tk.PhotoImage(file="file_icons\\file.png")
pdf_img = tk.PhotoImage(file="file_icons\\pdf.png")
photo_img = tk.PhotoImage(file="file_icons\\photo.png")
txt_img = tk.PhotoImage(file="file_icons\\txt.png")
exe_img = tk.PhotoImage(file="file_icons\\exe.png")
zip_img = tk.PhotoImage(file="file_icons\\zip.png")
word_img = tk.PhotoImage(file="file_icons\\word.png")
powerpoint_img = tk.PhotoImage(file="file_icons\\powerpoint.png")
video_img = tk.PhotoImage(file="file_icons\\video.png")
music_img = tk.PhotoImage(file="file_icons\\music.png")
excel_img = tk.PhotoImage(file="file_icons\\excel.png")
drive_img = tk.PhotoImage(file="file_icons\\drive.png")
back_img = tk.PhotoImage(file="file_icons\\back.png")
green_img = tk.PhotoImage(file="assets/gui_icons/green_16.png")
# My fonts
myFont_title = Font(family="Helvetica", size=14, weight="bold")
myFont_title_normal = Font(family="Helvetica", size=13, weight="bold")
myFont_normal = Font(family="Helvetica", size=13)
# My Notebook
my_notebook = ttk.Notebook(root)
my_notebook.grid(row=0, column=0, pady=5)
# <------Connection Tab -------------->
connection_frame = tk.LabelFrame(my_notebook, padx=100, pady=5, bd=0)
connection_frame.grid(row=0, column=0, padx=40, pady=40, sticky=tk.N)
# Logo Label
img_logo = ImageTk.PhotoImage(Image.open("assets/gui_icons/logo.png"))
label_note = tk.Label(connection_frame, image=img_logo, anchor=tk.CENTER)
label_note.grid(row=0, column=0, pady=5, columnspan=2, sticky=tk.N)
# Form elements frame
form_frame = tk.LabelFrame(connection_frame, text="Control Remote Box", padx=20, pady=5)
form_frame.configure(font=myFont_title)
form_frame.grid(row=1, column=0, padx=120, pady=(40, 20), sticky=tk.N)
# Form for Input data
name_label = tk.Label(form_frame, text="Computer Name/IP", padx=5, pady=5)
name_label.configure(font=myFont_title_normal)
name_label.grid(row=0, column=0, pady=5, columnspan=2, sticky=tk.W)
name_entry = tk.Entry(form_frame, width=20)
name_entry.configure(font=myFont_normal)
name_entry.grid(row=1, column=0, pady=5, columnspan=2, sticky=tk.N)
port_label = tk.Label(form_frame, text="Port", padx=5, pady=5)
port_label.configure(font=myFont_title_normal)
port_label.grid(row=2, column=0, pady=5, columnspan=2, sticky=tk.W)
port_entry = tk.Entry(form_frame, width=20)
port_entry.configure(font=myFont_normal)
port_entry.grid(row=3, column=0, pady=5, columnspan=2, sticky=tk.N)
pass_label = tk.Label(form_frame, text="Password", padx=5, pady=5)
pass_label.configure(font=myFont_title_normal)
pass_label.grid(row=4, column=0, pady=5, columnspan=2, sticky=tk.W)
pass_entry = tk.Entry(form_frame, show="*", width=20)
pass_entry.configure(font=myFont_normal)
pass_entry.grid(row=5, column=0, pady=5, columnspan=2, sticky=tk.N)
# Button frame
button_frame = tk.LabelFrame(form_frame, padx=2, pady=5, bd=0)
button_frame.grid(row=6, column=0, padx=5, pady=2)
# Connect and Disconnect button
connect_button = tk.Button(button_frame, text="Connect", padx=4, pady=1, command=login)
connect_button.configure(font=myFont_title_normal)
connect_button.grid(row=0, column=0, sticky=tk.N, padx=5, pady=5)
disconnect_button = tk.Button(button_frame, text="Disconnect", padx=2, pady=1, command=lambda: disconnect("button"))
disconnect_button.configure(font=myFont_title_normal, state=tk.DISABLED)
disconnect_button.grid(row=0, column=1, sticky=tk.N, padx=5, pady=5)
# Access Button Frame
access_button_frame = tk.LabelFrame(connection_frame, text="Access", padx=5, pady=15)
access_button_frame.configure(font=myFont_title)
access_button_frame.grid(row=7, column=0, padx=10, pady=10, columnspan=2, sticky=tk.W+tk.E)
# Disable access frame when not connected
access_button_frame.grid_forget()
# images
remote_img = tk.PhotoImage(file="assets/gui_icons/remote_32.png")
chat_img = tk.PhotoImage(file="assets/gui_icons/chat_32.png")
file_transfer_img = tk.PhotoImage(file="assets/gui_icons/file_transfer_32.png")
# View Remote Box button
remote_button = tk.Button(access_button_frame, text="Remote Box", image=remote_img, compound=tk.TOP, padx=2,
pady=2, bd=0, command=remote_display)
remote_button.configure(font=myFont_normal)
remote_button.grid(row=0, column=1, sticky=tk.W, padx=30) # padx =60
# Chat button
chat_button = tk.Button(access_button_frame, text="Chat", image=chat_img, compound=tk.TOP, padx=3, pady=2, bd=0,
command=lambda: my_notebook.select(1))
chat_button.configure(font=myFont_normal)
chat_button.grid(row=0, column=2, sticky=tk.W, padx=30)
# File transfer button
file_button = tk.Button(access_button_frame, text="File Transfer", image=file_transfer_img, compound=tk.TOP, padx=2,
pady=2, bd=0, command=file_transfer_window) # lambda: my_notebook.select(2)
file_button.configure(font=myFont_normal)
file_button.grid(row=0, column=3, sticky=tk.W, padx=30)
# <------Chat Tab -------------->
chat_frame = tk.LabelFrame(my_notebook, padx=20, pady=20, bd=0)
chat_frame.grid(row=0, column=0, sticky=tk.N)
# text_frame = tk.LabelFrame(chat_frame, bd=0)
# text_frame.grid(row=0, column=0)
# Scroll bar to event frame
scroll_chat_widget = tk.Scrollbar(chat_frame)
scroll_chat_widget.grid(row=0, column=1, sticky=tk.N + tk.S)
# Text Widget
text_chat_widget = tk.Text(chat_frame, width=60, height=22, font=("Helvetica", 14), padx=10, pady=10,
yscrollcommand=scroll_chat_widget.set)
# text_chat_widget.insert(1.0, "By Default Share Funny Jokes")
text_chat_widget.configure(state='disabled')
text_chat_widget.grid(row=0, column=0, sticky=tk.N)
scroll_chat_widget.config(command=text_chat_widget.yview)
# Frame for input text
input_text_frame = tk.LabelFrame(chat_frame, pady=5, bd=0)
input_text_frame.grid(row=1, column=0, sticky=tk.W)
# Text Widget
input_text_widget = tk.Entry(input_text_frame, width=60)
input_text_widget.configure(font=("Helvetica", 14))
input_text_widget.bind("<Return>", send_chat_message)
input_text_widget.grid(row=0, column=0, pady=10, sticky=tk.W)
# # <------File Transfer Tab -------------->
# file_transfer_frame = tk.LabelFrame(my_notebook, padx=40, pady=40, bd=0)
# file_transfer_frame.grid(row=0, column=0, sticky=tk.N)
#
# # Upload file frame
# upload_frame = tk.LabelFrame(file_transfer_frame,text="Upload a File", padx=40, pady=40)
# upload_frame.configure(font=myFont_title_normal)
# upload_frame.grid(row=0, column=0, sticky=tk.W+tk.E)
#
# # Select a file to upload Button
# select_file_upload_btn = tk.Button(upload_frame, text="Select File", padx=3, pady=2, command=open_file)
# select_file_upload_btn.configure(font=myFont_title_normal)
# select_file_upload_btn.grid(row=0, column=0, sticky=tk.W, padx=30)
#
# # Selected file label
# select_file_upload_label = tk.Label(upload_frame, padx=5, pady=5)
# select_file_upload_label.configure(font=myFont_title_normal)
# select_file_upload_label.grid(row=1, column=0, pady=5, sticky=tk.W)
#
# # Select location Button
# select_loc_upload_btn = tk.Button(upload_frame, text="Select Location", padx=3, pady=2)
# select_loc_upload_btn.configure(font=myFont_title_normal)
# select_loc_upload_btn.grid(row=0, column=1, sticky=tk.W, padx=30)
#
# # Upload Button
# file_upload_btn = tk.Button(upload_frame, text="Upload", padx=3, pady=2)
# file_upload_btn.configure(font=myFont_title_normal)
# file_upload_btn.grid(row=2, column=0, columnspan=2, sticky=tk.N, padx=30)
# <-------------Event log Tab --------------------->
# Event_log Frame
event_frame = tk.LabelFrame(my_notebook, text="Event Log", padx=20, pady=20, relief=tk.FLAT)
event_frame.configure(font=myFont_title)
event_frame.grid(row=3, column=0, columnspan=2, padx=40, pady=5, sticky=tk.W)
# Scroll bar to event frame
scroll_widget = tk.Scrollbar(event_frame)
scroll_widget.grid(row=0, column=1, sticky=tk.N + tk.S)
# Text Widget
text_1 = tk.Text(event_frame, width=50, height=7, font=("Helvetica", 13), padx=10, pady=10,
yscrollcommand=scroll_widget.set)
text_1.insert(1.0, "By Default Show Event Logs")
text_1.configure(state='disabled')
text_1.grid(row=0, column=0)
scroll_widget.config(command=text_1.yview)
# Status Label
label_status = tk.Label(root, text="Connected", image=green_img, compound=tk.LEFT, relief=tk.SUNKEN, bd=0, anchor=tk.E,
padx=10)
label_status.configure(font=myFont_normal)
label_status.grid(row=3, column=0, columnspan=2, sticky=tk.W + tk.E)
label_status.grid_remove()
# Create Tab style
tab_style = ttk.Style()
tab_style.configure('TNotebook.Tab', font=('Helvetica', '13', 'bold'))
# Tab Creation
my_notebook.add(connection_frame, text=" Connection ")
my_notebook.add(chat_frame, text=" Chat ")
# my_notebook.add(file_transfer_frame, text=" File Transfer ")
my_notebook.add(event_frame, text=" Event Logs ")
# Hide Tab
my_notebook.hide(1)
my_notebook.hide(2)
root.mainloop()
|
master.py
|
#!/usr/bin/env python
from multiprocessing import Process, Pipe, Queue
from audioproc import AudioRecorder
from videoproc import VideoRecorder
import time
import signal
TRIGGER_HEADER = 'TRGR'
def run_audio_proc(pipe_endpt):
""" Run audio monitor in its own process
"""
rec = AudioRecorder()
print("audioproc ready")
try:
while (rec.waitForLoudNoise()):
# send a trigger message
le_time = time.localtime()
pipe_endpt.send([TRIGGER_HEADER,le_time])
except KeyboardInterrupt:
print("audioproc KeyboardInterrupt")
rec = None
pipe_endpt = None
print("audioproc done")
def run_video_proc(pipe_endpt):
""" Run video capture in its own process
"""
rec = VideoRecorder()
print("videoproc ready")
try:
while (True):
# wait for a trigger message from the audio monitor
msg = pipe_endpt.recv()
print(msg)
if (msg[0] == TRIGGER_HEADER):
rec.recordSegment()
except KeyboardInterrupt:
print("videoproc KeyboardInterrupt")
rec = None
pipe_endpt = None
print("videoproc done")
def main():
# create a pipe for the two subprocesses to commnicate with
video_proc_endpt, audio_proc_endpt = Pipe()
# create and start separate audio and video subprocesses
audio_proc = Process(target=run_audio_proc, args=((audio_proc_endpt),))
audio_proc.start()
video_proc = Process(target=run_video_proc, args=((video_proc_endpt),))
video_proc.start()
try:
audio_proc.join()
video_proc.join()
except KeyboardInterrupt:
print("master KeyboardInterrupt")
if __name__ == '__main__':
main()
|
test__threading_vs_settrace.py
|
from __future__ import print_function
import sys
import subprocess
import unittest
from gevent.thread import allocate_lock
script = """
from gevent import monkey
monkey.patch_all()
import sys, os, threading, time
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
time.sleep(0.1)
sys.stdout.write('..program blocked; aborting!')
sys.stdout.flush()
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
def trace(frame, event, arg):
if threading is not None:
threading.currentThread()
return trace
def doit():
sys.stdout.write("..thread started..")
def test1():
t = threading.Thread(target=doit)
t.start()
t.join()
sys.settrace(None)
sys.settrace(trace)
if len(sys.argv) > 1:
test1()
sys.stdout.write("..finishing..")
"""
class TestTrace(unittest.TestCase):
def test_untraceable_lock(self):
# Untraceable locks were part of the solution to https://bugs.python.org/issue1733757
# which details a deadlock that could happen if a trace function invoked
# threading.currentThread at shutdown time---the cleanup lock would be held
# by the VM, and calling currentThread would try to acquire it again. The interpreter
# changed in 2.6 to use the `with` statement (https://hg.python.org/cpython/rev/76f577a9ec03/),
# which apparently doesn't trace in quite the same way.
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
PYPY = hasattr(sys, 'pypy_version_info')
lst = []
try:
def trace(frame, ev, arg):
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
if not PYPY: # because we expect to trace on PyPy
print("TRACE: %s:%s %s" % lst[-1])
return trace
with allocate_lock():
sys.settrace(trace)
finally:
sys.settrace(old)
if not PYPY:
self.assertEqual(lst, [], "trace not empty")
else:
# Have an assert so that we know if we miscompile
self.assertTrue(len(lst) > 0, "should not compile on pypy")
def test_untraceable_lock_uses_different_lock(self):
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
PYPY = hasattr(sys, 'pypy_version_info')
lst = []
# we should be able to use unrelated locks from within the trace function
l = allocate_lock()
try:
def trace(frame, ev, arg):
with l:
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
if not PYPY: # because we expect to trace on PyPy
print("TRACE: %s:%s %s" % lst[-1])
return trace
l2 = allocate_lock()
sys.settrace(trace)
# Separate functions, not the C-implemented `with` so the trace
# function gets a crack at them
l2.acquire()
l2.release()
finally:
sys.settrace(old)
if not PYPY:
self.assertEqual(lst, [], "trace not empty")
else:
# Have an assert so that we know if we miscompile
self.assertTrue(len(lst) > 0, "should not compile on pypy")
def test_untraceable_lock_uses_same_lock(self):
from gevent.hub import LoopExit
if hasattr(sys, 'gettrace'):
old = sys.gettrace()
else:
old = None
PYPY = hasattr(sys, 'pypy_version_info')
lst = []
e = None
# we should not be able to use the same lock from within the trace function
# because it's over acquired but instead of deadlocking it raises an exception
l = allocate_lock()
try:
def trace(frame, ev, arg):
with l:
lst.append((frame.f_code.co_filename, frame.f_lineno, ev))
return trace
sys.settrace(trace)
# Separate functions, not the C-implemented `with` so the trace
# function gets a crack at them
l.acquire()
except LoopExit as ex:
e = ex
finally:
sys.settrace(old)
if not PYPY:
self.assertEqual(lst, [], "trace not empty")
else:
# Have an assert so that we know if we miscompile
self.assertTrue(len(lst) > 0, "should not compile on pypy")
self.assertTrue(isinstance(e, LoopExit))
def run_script(self, more_args=()):
args = [sys.executable, "-c", script]
args.extend(more_args)
rc = subprocess.call(args)
self.assertNotEqual(rc, 2, "interpreter was blocked")
self.assertEqual(rc, 0, "Unexpected error")
def test_finalize_with_trace(self):
self.run_script()
def test_bootstrap_inner_with_trace(self):
self.run_script(["1"])
if __name__ == "__main__":
import greentest
greentest.main()
|
Hiwin_RT605_Socket_v3_20190628113312.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd_v3 as TCP
import HiwinRA605_socket_Taskcmd_v3 as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%speedmode)
# def point_data(req): ##接收策略端傳送位姿資料
# pos.x = '%s'%req.x
# pos.y = '%s'%req.y
# pos.z = '%s'%req.z
# pos.pitch = '%s'%req.pitch
# pos.roll = '%s'%req.roll
# pos.yaw = '%s'%req.yaw
# return(1)
# ##----------Arm Mode-------------###
# def Arm_Mode(req): ##接收策略端傳送手臂模式資料
# global arm_mode_flag
# socket_cmd.action = int('%s'%req.action)
# socket_cmd.grip = int('%s'%req.grip)
# socket_cmd.ra = int('%s'%req.ra)
# socket_cmd.setvel = int('%s'%req.vel)
# socket_cmd.setboth = int('%s'%req.both)
# arm_mode_flag = True
# Socket_command()
# return(1)
# ##-------Arm Speed Mode------------###
# def Speed_Mode(req): ##接收策略端傳送手臂模式資料
# global speed_mode_flag
# socket_cmd.Speedmode = int('%s'%req.Speedmode)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
#a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
#s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
#b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
rate = rospy.Rate(100) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
#Socket = client()
#Socket.get_connect()
#print("Socket_client :",dir(Socket))
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
test_urllib.py
|
"""Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import sys
import mimetools
import tempfile
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp('Hello!')
try:
fp = urllib.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp("""HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file:README
Connection: close
Content-Type: text/html; charset=iso-8859-1
""")
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
self.assertRaises(TypeError, urllib.quote, None)
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the nturl2path library')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.url2pathname(url)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of the password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
cron.py
|
from collections import namedtuple
from datetime import datetime, timedelta
import logging
import threading
from time import sleep
CRON_ENCORE = 'CRON_ENCORE'
CRON_STOP = 'CRON_STOP'
logger = logging.getLogger('liberapay.cron')
Daily = namedtuple('Daily', 'hour')
Weekly = namedtuple('Weekly', 'weekday hour')
class Cron:
def __init__(self, website):
self.website = website
self.conn = None
self._wait_for_lock_thread = None
self.has_lock = False
self.jobs = []
def __call__(self, period, func, exclusive=False):
job = Job(self, period, func, exclusive)
self.jobs.append(job)
if not self.website.env.run_cron_jobs or not period:
return
if exclusive and not self.has_lock:
self._wait_for_lock()
return
job.start()
def _wait_for_lock(self):
if self.conn:
return # Already waiting
self.conn = self.website.db.get_connection().__enter__()
def f():
cursor = self.conn.cursor()
while True:
if cursor.one("SELECT pg_try_advisory_lock(0)"):
self.has_lock = True
break
sleep(300)
for job in self.jobs:
if job.exclusive:
job.start()
t = self._wait_for_lock_thread = threading.Thread(target=f)
t.daemon = True
t.start()
class Job:
__slots__ = ('cron', 'period', 'func', 'exclusive', 'thread')
def __init__(self, cron, period, func, exclusive=False):
self.cron = cron
self.period = period
self.func = func
self.exclusive = exclusive
self.thread = None
def __repr__(self):
return f"Job(func={self.func!r}, period={self.period!r}, exclusive={self.exclusive!r}, thread={self.thread!r})"
def start(self):
if self.thread and self.thread.is_alive() or not self.period:
return
def f():
while True:
period = self.period
if isinstance(period, Weekly):
now = datetime.utcnow()
then = now.replace(hour=period.hour, minute=10, second=0)
days = (period.weekday - now.isoweekday()) % 7
if days:
then += timedelta(days=days)
seconds = (then - now).total_seconds()
if seconds > 0:
sleep(seconds)
elif seconds < -60:
sleep(86400 * 6)
continue
elif isinstance(period, Daily):
now = datetime.utcnow()
then = now.replace(hour=period.hour, minute=5, second=0)
seconds = (then - now).total_seconds()
if seconds > 0:
# later today
sleep(seconds)
elif seconds < -60:
# tomorrow
sleep(3600 * 24 + seconds)
try:
if isinstance(period, (float, int)) and period < 300:
logger.debug(f"Running {self!r}")
else:
logger.info(f"Running {self!r}")
test_hook()
r = self.func()
except Exception as e:
self.cron.website.tell_sentry(e, {})
# retry in 5 minutes
sleep(300)
continue
else:
if r is CRON_ENCORE:
sleep(2)
continue
if r is CRON_STOP:
return
if period == 'once':
return
elif isinstance(period, (float, int)):
sleep(period)
else:
sleep(3600 * 23)
t = self.thread = threading.Thread(target=f, name=self.func.__name__)
t.daemon = True
t.start()
return t
def test_hook():
pass
|
duet_test.py
|
# stdlib
from multiprocessing import set_start_method
import socket
import time
from typing import Callable
from typing import List
from typing import Tuple
# third party
import pytest
# syft absolute
from syft.grid.duet import test_duet_network
# syft relative
from .duet_scenarios_tests import register_duet_scenarios
from .process_test import SyftTestProcess
set_start_method("spawn", force=True)
registered_tests: List[Tuple[str, Callable, Callable]] = []
register_duet_scenarios(registered_tests)
@pytest.mark.fast
def test_duet_network_availability() -> None:
assert test_duet_network() is True
@pytest.mark.slow
def test_duet(signaling_server: int) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
assert s.connect_ex(("localhost", signaling_server)) == 0
for testcase, do, ds in registered_tests:
start = time.time()
do_proc = SyftTestProcess(target=do, args=(signaling_server,))
do_proc.start()
ds_proc = SyftTestProcess(target=ds, args=(signaling_server,))
ds_proc.start()
ds_proc.join(30)
do_proc.terminate()
if do_proc.exception:
exception, tb = do_proc.exception
raise Exception(tb) from exception
if ds_proc.exception:
exception, tb = ds_proc.exception
raise Exception(tb) from exception
if ds_proc.is_alive():
ds_proc.terminate()
raise Exception(f"ds_proc is hanged in {testcase}")
print(f"test {testcase} passed in {time.time() - start} seconds")
|
train.py
|
"""
train your model and support eval when training.
"""
import os
import sys
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
import multiprocessing as mp
import time
import argparse
import megengine as mge
import megengine.distributed as dist
from megengine.jit import trace
from megengine.data import RandomSampler, SequentialSampler, DataLoader
from edit.utils import Config, mkdir_or_exist, build_from_cfg, get_root_logger
from edit.models import build_model
from edit.datasets import build_dataset
from edit.core.runner import EpochBasedRunner
from edit.core.hook import HOOKS
from edit.core.hook.evaluation import EvalIterHook
def parse_args():
parser = argparse.ArgumentParser(description='Train and Eval an editor o(* ̄▽ ̄*)ブ')
parser.add_argument('config', help='train config file path')
parser.add_argument("-d", "--dynamic", default=True, action='store_true', help="enable dygraph mode")
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument("--gpuids", type=str, default="-1", help="spcefic gpus, -1 for cpu, >=0 for gpu, e.g.: 2,3")
parser.add_argument('--work_dir', type=str, default=None, help='the dir to save logs and models')
parser.add_argument('--resume_from', type=str, default=None, help='the checkpoint file to resume from')
args = parser.parse_args()
return args
def get_loader(dataset, cfg, mode='train'):
assert mode in ('train', 'eval')
if mode == 'train':
sampler = RandomSampler(dataset, batch_size=cfg.data.samples_per_gpu, drop_last=True, seed=0)
loader = DataLoader(dataset, sampler, num_workers=cfg.data.workers_per_gpu)
else:
samples_per_gpu = cfg.data.get('eval_samples_per_gpu', cfg.data.samples_per_gpu)
workers_per_gpu = cfg.data.get('eval_workers_per_gpu', cfg.data.workers_per_gpu)
if cfg.evaluation.multi_process is True:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False)
else:
sampler = SequentialSampler(dataset, batch_size=samples_per_gpu, drop_last=False, world_size=1, rank=0)
loader = DataLoader(dataset, sampler, num_workers=workers_per_gpu)
return loader
def train(model, datasets, cfg, rank):
data_loaders = [ get_loader(ds, cfg, 'train') for ds in datasets]
runner = EpochBasedRunner(model=model, optimizers_cfg=cfg.optimizers, work_dir=cfg.work_dir)
runner.create_gradmanager_and_optimizers()
if cfg.resume_from is not None:
runner.resume(cfg.resume_from, cfg.get('resume_optim', True))
elif cfg.load_from is not None:
runner.load_checkpoint(cfg.load_from, load_optim=False)
else:
pass
runner.sync_model_params()
# register some useful hooks
runner.register_training_hooks(lr_config=cfg.lr_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config)
# register evaluation hook
if cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.eval)
save_path = os.path.join(cfg.work_dir, 'eval_visuals')
log_path = os.path.join(cfg.work_dir, 'eval.log')
runner.register_hook(EvalIterHook(get_loader(dataset, cfg, 'eval'), save_path=save_path, log_path=log_path, **cfg.evaluation))
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def worker(rank, world_size, cfg, gpu_id="0", port=23333):
if cfg.dynamic:
trace.enabled = False
if world_size > 1:
dist.init_process_group(
master_ip = "localhost",
port = port,
world_size = world_size,
rank = rank,
device = int(gpu_id)%10,
)
log_file = os.path.join(cfg.work_dir, 'rank{}_root.log'.format(rank))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
model = build_model(cfg.model, train_cfg=cfg.train_cfg, eval_cfg=cfg.eval_cfg) # 此时参数已经随机化完成
datasets = [build_dataset(cfg.data.train)]
train(model, datasets, cfg, rank)
def main():
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.dynamic = args.dynamic
if args.work_dir is not None:
cfg.work_dir = args.work_dir
else:
assert cfg.get('work_dir', None) is not None, 'if do not set work_dir in args, please set in config file'
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.work_dir = os.path.join(cfg.work_dir, timestamp)
mkdir_or_exist(os.path.abspath(cfg.work_dir))
log_file = os.path.join(cfg.work_dir, 'root.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
logger.info('Config:\n{}'.format(cfg.text))
gpu_list = [ item.strip() for item in args.gpuids.split(",")]
if gpu_list[0] == "-1":
world_size = 0 # use cpu
logger.info('training use only cpu')
else:
world_size = len(gpu_list)
logger.info('training gpus num: {}'.format(world_size))
if world_size == 0: # use cpu
mge.set_default_device(device='cpux')
elif world_size == 1:
mge.set_default_device(device='gpu' + gpu_list[0])
else:
pass
if world_size > 1:
# scale weight decay in "SUM" mode
port = dist.util.get_free_ports(1)[0]
server = dist.Server(port)
processes = []
for rank in range(world_size):
logger.info("init distributed process group {} / {}".format(rank, world_size))
p = mp.Process(target=worker, args=(rank, world_size, cfg, gpu_list[rank], port))
p.start()
processes.append(p)
for rank in range(world_size):
processes[rank].join()
code = processes[rank].exitcode
assert code == 0, "subprocess {} exit with code {}".format(rank, code)
else:
worker(0, 1, cfg)
if __name__ == "__main__":
main()
|
prettierd.py
|
import sublime
import sublime_plugin
import pathlib, socket, json, subprocess, threading, fnmatch
from .lib.diff_match_patch import diff_match_patch
__version__ = "0.1.0"
prettierd: "Prettierd | None" = None
save_without_format = False
def toggle_save_without_format(force=None, timeout=500):
global save_without_format
if force is None:
save_without_format = not save_without_format
sublime.set_timeout_async(lambda: toggle_save_without_format(force=False), timeout)
else:
save_without_format = force
def plugin_loaded():
global prettierd
prettierd = Prettierd()
def plugin_unloaded():
global prettierd
if prettierd:
prettierd.terminate()
class Prettierd:
def __init__(self):
self.script = pathlib.Path(__file__).parent.joinpath('prettierd.mjs').resolve()
self.settings = sublime.load_settings("prettier.sublime-settings")
self.port = self.settings.get("port") or 9870
self.seq = 0
self.ready = False
self.child: subprocess.Popen[bytes] | None = None
self.on_done = lambda x: None
self.terminated = False
sublime.set_timeout_async(self.spawn_subprocess, 500)
def spawn_subprocess(self):
print('prettierd: spawning subprocess')
sublime.status_message("Prettier: warming up...")
si = None
if sublime.platform() == "windows":
si = subprocess.STARTUPINFO() # type: ignore
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
self.child = subprocess.Popen(
["node", self.script, str(self.port)],
startupinfo=si,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.poll_ready_state()
def poll_ready_state(self):
if not self.child:
return self.retry()
try:
io = self.child.stdout
if not io:
return self.retry()
res = io.readline().decode('utf-8').rstrip()
ret = json.loads(res) if res else None
if ret and 'ok' in ret and ret['ok'] == self.port:
self.ready = True
self.refresh_statuses()
sublime.status_message("Prettier: ready.")
threading.Thread(target=self.poll_close_state).start()
else:
print('prettierd:', res)
sublime.status_message("Prettier: something went wrong.")
self.retry()
except Exception as e:
print('prettierd error in poll_ready_state:', e)
self.retry()
def retry(self):
if self.terminated: return
print('prettierd: retry')
self.ready = False
self.request("quit", timeout=100)
sublime.set_timeout_async(self.spawn_subprocess, 3000)
def poll_close_state(self):
if not self.child:
return;
_, stderr = self.child.communicate()
if stderr:
msg = stderr.decode('utf-8')
sublime.status_message(f"Prettier: {msg}")
if b'EADDRINUSE' in stderr:
self.retry()
def terminate(self):
if self.child: self.child.terminate()
self.terminated = True
self.clear_statuses()
def each_view(self):
for window in sublime.windows():
yield from window.views()
def clear_statuses(self):
for view in self.each_view():
view.erase_status("prettier")
def refresh_statuses(self):
for view in self.each_view():
if view.get_status("prettier"): continue
self.request_formattable(view, lambda x: self.on_formattable(x, view))
def request_formattable(self, view, on_done):
if not view.file_name(): return
if self.is_ignored(view.file_name()): return
timeout = self.settings.get("query_timeout")
self.request("getFileInfo", { "path": view.file_name() }, timeout, on_done)
def is_ignored(self, file_name):
for p in self.settings.get("file_exclude_patterns"):
if fnmatch.fnmatch(file_name, p): return True
def on_formattable(self, ok, view):
if "inferredParser" in ok:
parser = ok["inferredParser"] or "off"
view.set_status("prettier", f"Prettier ({parser})")
elif "ignored" in ok and ok["ignored"] is True:
view.set_status("prettier", f"Prettier (ignored)")
def request_format(self, view, on_done):
status = view.get_status("prettier")
if not status: return sublime.status_message("Prettier: not ready")
parser = status[10:-1]
if parser in ('off', 'ignored'): return
path = view.file_name()
contents = view.substr(sublime.Region(0, view.size()))
cursor = s[0].b if (s := view.sel()) else 0
timeout = self.settings.get("format_timeout")
payload = { 'path': path, 'contents': contents, 'parser': parser, 'cursor': cursor }
sublime.status_message("Prettier: formatting...")
self.request("format", payload, timeout, on_done)
def on_format(self, ok, view, save_on_format=False):
contents = view.substr(sublime.Region(0, view.size()))
if "formatted" in ok and ok["formatted"] != contents:
ok['save_on_format'] = save_on_format
view.run_command("prettier_format", {
'formatted': ok["formatted"],
'cursor': ok['cursorOffset'],
'save_on_format': save_on_format,
})
def do_replace(self, edit, view, formatted, cursor, save_on_format=False):
original = view.substr(sublime.Region(0, view.size()))
patches = diff_match_patch().patch_make(original, formatted)
for obj in patches:
point = obj.start1
for i, text in obj.diffs:
if i == 0:
point += len(text)
elif i == 1:
view.insert(edit, point, text)
point += len(text)
elif i == -1:
view.erase(edit, sublime.Region(point, point + len(text)))
sel = view.sel()
sel.clear()
sel.add(sublime.Region(cursor, cursor))
if save_on_format:
sublime.set_timeout(lambda: view.run_command("save"), 100)
sublime.set_timeout_async(lambda: sublime.status_message('Prettier: formatted.'), 110)
else:
sublime.status_message('Prettier: formatted.')
def request(self, method, params=None, timeout=None, on_done=lambda x: None):
if not self.ready: return
self.seq += 1
self.on_done = on_done
sublime.set_timeout_async(lambda: self.request_sync(self.seq, method, params, timeout=timeout), 0)
def make_request(self, seq, method, params):
request = { 'id': seq, 'method': method, 'params': params }
return bytes(json.dumps(request), 'utf-8')
def request_sync(self, seq, method, params=None, timeout=None):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(timeout)
s.connect(('localhost', self.port))
s.sendall(self.make_request(seq, method, params))
s.shutdown(socket.SHUT_WR)
res = b''
while True:
chunk = s.recv(512)
if not chunk: break
res += chunk
if ret := json.loads(res):
if 'err' in ret:
print('prettierd:', ret['err'])
sublime.status_message(f"Prettier: {ret['err']}")
elif 'ok' in ret and self.seq == seq:
self.on_done(ret['ok'])
except socket.timeout:
sublime.status_message("Prettier: timeout")
except Exception as e:
print('prettierd error in request:', e)
self.retry()
def do_formattable(self, view):
self.request_formattable(view, lambda x: self.on_formattable(x, view))
def do_format(self, view, save_on_format=False):
self.request_format(view, lambda x: self.on_format(x, view, save_on_format=save_on_format))
def do_clear_cache(self):
self.request("clearConfigCache")
class PrettierFormat(sublime_plugin.TextCommand):
def run(self, edit, save_on_format=False, formatted=None, cursor=0):
if not prettierd or not prettierd.ready:
return
if formatted:
prettierd.do_replace(edit, self.view, formatted, cursor, save_on_format=save_on_format)
else:
prettierd.do_format(self.view, save_on_format=save_on_format)
class PrettierSaveWithoutFormat(sublime_plugin.TextCommand):
def run(self, _):
toggle_save_without_format()
self.view.run_command("save")
class PrettierListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
if prettierd and not save_without_format and prettierd.settings.get('format_on_save'):
save_on_format = prettierd.settings.get('save_on_format')
view.run_command('prettier_format', { 'save_on_format': save_on_format })
def on_post_save(self, view):
if prettierd and view.file_name() and '.prettierrc' in view.file_name():
prettierd.do_clear_cache()
def on_activated(self, view):
if prettierd:
prettierd.do_formattable(view)
def on_exit(self):
if prettierd:
prettierd.terminate()
class PrettierClearCache(sublime_plugin.ApplicationCommand):
def run(self):
if prettierd:
prettierd.do_clear_cache()
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from . import __version__
from ._compat import getargspec, iteritems, reraise
from .globals import current_app
from .helpers import get_debug_flag
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ('app', 'application'):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [
v for k, v in iteritems(module.__dict__) if isinstance(v, Flask)
]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Auto-detected multiple Flask applications in module "{module}".'
' Use "FLASK_APP={module}:name" to specify the correct'
' one.'.format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ('create_app', 'make_app'):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(app_factory, script_info)
if isinstance(app, Flask):
return app
except TypeError:
raise NoAppException(
'Auto-detected "{function}()" in module "{module}", but '
'could not call it without specifying arguments.'.format(
function=attr_name, module=module.__name__
)
)
raise NoAppException(
'Failed to find application in module "{module}". Are you sure '
'it contains a Flask application? Maybe you wrapped it in a WSGI '
'middleware.'.format(module=module.__name__)
)
def call_factory(app_factory, script_info, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if 'script_info' in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def find_app_by_string(string, script_info, module):
"""Checks if the given string is a variable name or a function. If it is
a function, it checks for specified arguments and whether it takes
a ``script_info`` argument and calls the function with the appropriate
arguments."""
from . import Flask
function_regex = r'^(?P<name>\w+)(?:\((?P<args>.*)\))?$'
match = re.match(function_regex, string)
if match:
name, args = match.groups()
try:
if args is not None:
args = args.rstrip(' ,')
if args:
args = ast.literal_eval(
"({args}, )".format(args=args))
else:
args = ()
app_factory = getattr(module, name, None)
app = call_factory(app_factory, script_info, args)
else:
attr = getattr(module, name, None)
if inspect.isfunction(attr):
app = call_factory(attr, script_info)
else:
app = attr
if isinstance(app, Flask):
return app
else:
raise NoAppException('Failed to find application in module '
'"{name}"'.format(name=module))
except TypeError as e:
new_error = NoAppException(
'{e}\nThe app factory "{factory}" in module "{module}" could'
' not be called with the specified arguments (and a'
' script_info argument automatically added if applicable).'
' Did you make sure to use the right number of arguments as'
' well as not using keyword arguments or'
' non-literals?'.format(e=e, factory=string, module=module))
reraise(NoAppException, new_error, sys.exc_info()[2])
else:
raise NoAppException(
'The provided string "{string}" is not a valid variable name'
'or function expression.'.format(string=string))
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
if os.path.splitext(path)[1] == '.py':
path = os.path.splitext(path)[0]
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
"""Attempts to locate the application."""
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
'\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException(
'Could not import "{name}"."'.format(name=module_name)
)
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(app_name, script_info, module)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True, is_eager=True)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self.create_app, self)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(':', 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ('wsgi.py', 'app.py'):
import_name = prepare_import(path)
app = locate_app(
self, import_name, None, raise_if_not_found=False
)
if app:
break
if not app:
raise NoAppException(
'Could not locate a Flask application. You did not provide '
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
debug = get_debug_flag()
if debug is not None:
app._reconfigure_for_run_debug(debug)
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self, add_default_commands=True, create_app=None,
add_version_option=True, load_dotenv=True, **extra
):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ['FLASK_RUN_FROM_CLI'] = 'true'
if self.load_dotenv:
load_dotenv()
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in ('.env', '.flaskenv'):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a bit on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (through the FLASK_APP environment
variable) and then provides commands either provided by the application or
Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
\b
%(prefix)s%(cmd)s FLASK_APP=hello.py
%(prefix)s%(cmd)s FLASK_DEBUG=1
%(prefix)sflask run
""" % {
'cmd': os.name == 'posix' and 'export' or 'set',
'prefix': os.name == 'posix' and '$ ' or '',
})
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = 'flask'
if sys.version_info < (2, 7):
this_module += '.cli'
name = 'python -m ' + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ['-m', this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8838
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if multifunc_ordered:
ret['return'][ind] = func(*args, **kwargs)
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][ind] = True
else:
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: %s', tgt)
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = six.text_type(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
nodegroups = self.opts.get('nodegroups', {})
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: %s ? %s', self.opts['id'], tgt)
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
# we make a shallow copy in order to not affect the passed in arg
words = tgt[:]
while words:
word = words.pop(0)
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": %s', word)
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# if we encounter a node group, just evaluate it in-place
decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups)
if decomposed:
words = decomposed + words
continue
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
'Unrecognized target engine "%s" for target '
'expression "%s"', target_info['engine'], word
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(six.text_type(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
|
messmenu.py
|
from flask import Flask, request
import bs4 as bs
import requests
import json
import os
import datetime
import time
import pytz
import threading
app = Flask(__name__)
debug_print_on = False
######################### Global Variables #########################
f = open('./data.json', "r")
db = json.loads(f.read())
f.close()
users = db["users"]
botToken = db["config"]["botToken"]
extrasPhotoId = db["config"]["extrasPhotoId"]
botUsername = db["config"]["botUsername"]
appUrl = db["config"]["appUrl"]
debugId = db["config"]["debugId"]
secretSauce = db["config"]["secretSauce"]
messUrl = db["config"]["messUrl"]
types = {4: "Current Menu", 1: "Breakfast", 2: "Lunch", 3: "Dinner", 5: "Full Menu"}
BLDString = {0:{1:None, 2:None, 3:None, 4:None, 5:None}, 1:{1:None, 2:None, 3:None, 4:None, 5:None}}
inlineResults = {0:[], 1:[]}
replyMarkup = '{"keyboard":[["/dh1_menu","/dh2_menu"],["/dh1_extras","/dh2_extras"],["/dh1_notifs","/dh2_notifs"],["/both_notifs","/deregister"],["/refresh", "/help"]]}'
helpString = """UPDATE: The notifications have been disabled due to vacations. Remind @vishaaaal to turn them back on if you want! \n\n\nHi. This bot will send you notifications with Menu of SNU Mess of your choice - three times a day: 7:30AM, 11:30AM, 7:30PM.\n\n\nYou can also interact with it here or use it inline in any chat/groups by typing "@snumessbot".\n\n\n/dh1\_menu - Get today's DH1 Menu\n/dh2\_menu - Get today's DH2 Menu\n/dh1\_extras - DH1's Ala-Carte, Evening, Drinks menu.\n/dh2\_extras - DH2's Rollu, Evening, Drinks menu.\n/dh1\_notifs - Daily notifications for DH1\n/dh2\_notifs - Daily notifications for DH2\n/both\_notifs - Daily notifications for BOTH\n/deregister - NO daily notifications\n/refresh - Update menu from SNU website\n/help - Display this help menu\n\nGithub repo: https://github.com/FlameFractal/SNU-Mess-Menu-Notifs/\n\n\nTo report a bug, suggest improvements, ask anything - msg @vishaaaal."""
######################### Some important functions #########################
def debug_print(debug_message):
if debug_print_on==True:
print(str(debug_message))
def update_db():
try:
f = open('./data.json', "w")
db["users"] = users
print(json.dumps(db))
f.write(json.dumps(db)) # update users database
f.close()
except:
debug_print('error updating db')
def sendMessage(user_id, msg):
users[user_id]["last_botReply"] = (requests.get('https://api.telegram.org/bot'+botToken+'/sendMessage?parse_mode=Markdown&chat_id='+str(user_id)+'&text='+msg+'&reply_markup='+replyMarkup+'&disable_web_page_preview=TRUE').text).replace('"',"'")
update_db()
return users[user_id]["last_botReply"]
def sendPhoto(user_id, photo, caption=''):
users[user_id]["last_botReply"] = (requests.get('https://api.telegram.org/bot'+botToken+'/sendPhoto?chat_id='+str(user_id)+'&photo='+str(photo)+'&caption='+str(caption)+'&replyMarkup='+replyMarkup).text).replace('"',"'")
update_db()
return users[user_id]["last_botReply"]
def answerInlineQuery(user_id, query_id, mess):
users[user_id]["last_botReply"] = (requests.get('https://api.telegram.org/bot'+botToken+'/answerInlineQuery?inline_query_id='+str(query_id)+'&switch_pm_text=Slow net? Try inside&switch_pm_parameter=help&results='+json.dumps(inlineResults[mess])).text).replace('"',"'")
update_db()
return users[user_id]["last_botReply"]
def getDishes(menuItems, mess_choice, t): # here type can only be 1,2,3 .... handle 4,5 seperately
s = "*DH"+str(mess_choice+1)+" "+types[t]+"*\n----------------\n"
for dish in menuItems[t].find_all('p'):
s = s+(dish.text).replace('"','').title().strip()+"\n" # why does dh2 menu always have "toast !!! somebody forgot an extra quote, remove it and other artefacts from dish names
return s
def fetchMenuItems():
try:
global inlineResults
global BLDString
time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
datestamp = "*"+time.strftime("%A")+", "+time.strftime("%d")+" "+time.strftime("%B")+" "+str(time.year)+"*\n\n"
for mess_choice in (0,1): # construct strings for all 10 types of menus
try:
menuItems= ((bs.BeautifulSoup(requests.get(messUrl, timeout=1).text,'lxml')).find_all(id='dh2MenuItems'))[mess_choice].find_all('td')
if('No Menu' in menuItems[0].text.strip()):
raise requests.exceptions.RequestException("_No Menu Available!_")
for t in types:
if t==1 or t==2 or t==3:
BLDString[mess_choice][t] = datestamp + getDishes(menuItems, mess_choice, t)
if t==4: # get according to current time
if time.hour<=10: #breakfast - midnight to 10:59am #send entire menu at breakfast
BLDString[mess_choice][t] = datestamp + getDishes(menuItems, mess_choice,1)+"\n"+getDishes(menuItems, mess_choice,2)+"\n"+getDishes(menuItems, mess_choice,3)
elif 11<=time.hour<=15: #lunch - 11am to 3:59pm
BLDString[mess_choice][t] = datestamp + getDishes(menuItems, mess_choice,2)
else: # dinner - 4pm to midnight
BLDString[mess_choice][t] = datestamp + getDishes(menuItems, mess_choice,3)
if t==5:
BLDString[mess_choice][t] = datestamp + getDishes(menuItems, mess_choice,1)+"\n"+getDishes(menuItems, mess_choice,2)+"\n"+getDishes(menuItems, mess_choice,3)
except requests.exceptions.RequestException as e:
for t in types:
BLDString[mess_choice][t] = datestamp+"*DH"+str(mess_choice+1)+" "+"*\n----------------\n"+"Oops. Error. Verify at "+messUrl+", and to refresh my menu send /refresh.\n\n*ERROR:* _"+str(e)+"_\n"
# construct strings for fast inline response
counter = 0
inlineResults = {0:[], 1:[]}
for mess_choice in (0,1):
for t in types:
inlineResults[mess_choice].append({"type":"article","id":str(counter),"title":"DH"+str(mess_choice+1)+" - "+types[t],"input_message_content":{"message_text":BLDString[mess_choice][t], "parse_mode": "Markdown"}})
counter = counter+1
inlineResults[mess_choice].append({"type":"photo","id":str(counter),"title":"DH"+str(mess_choice+1)+" - Extras Menu","photo_file_id":str(extrasPhotoId[mess_choice]),"description":"DH"+str(mess_choice+1)+" - Extras Menu","caption":"DH"+str(mess_choice+1)+" - Extras Menu"})
counter = counter + 1
# debug_print(str(BLDString)+"\n\n\n"+str(inlineResults))
return "I'm up to date with SNU website now. Thanks!"
except:
return "Error fetching menu"
def sendCurrentMenuAllUsers():
fetchMenuItems()
for user_id in users:
if "mess_choice" in users[user_id] and users[user_id]["mess_choice"] >= 0: # send only if registered for notifications
if users[user_id]["mess_choice"] == 2:
sendMessage(user_id, BLDString[0][4]+BLDString[1][4])
else:
sendMessage(user_id, BLDString[users[user_id]["mess_choice"]][4])
debug_print("sent notification to "+user_id)
return('')
def att(attended, conducted, percentage='75'):
attended = int(attended)
conducted = int(conducted)
percentage = int(percentage)
if not attended>0 or not conducted>0 or not attended<conducted or not 100>=percentage>=0:
return '/att \[attended] \[conducted] \[req att % eg. 75] (optional)'
temp = conducted
temp2 = attended
# can't miss
if (attended/conducted < percentage/100):
while(temp2/temp <= percentage/100):
temp2 = temp2 + 1
temp = temp + 1
s = "You need to attend "+str(temp2-attended)+" more classes for final of %0.2f" %((temp2*100)/(temp))
return s+"% = "+str(temp2)+"/"+str(temp)
else: # can miss
while(attended/temp>=percentage/100):
temp = temp+1
s = "You can miss "+str(temp-1-conducted)+" more classes for final of %0.2f" %((attended*100)/(temp-1))
return s +"% = "+str(attended)+"/"+str(temp-1)
def webhook_handler(response):
debug_print(response)
try:
requests.get('https://api.telegram.org/bot'+botToken+'/sendMessage?parse_mode=Markdown&chat_id='+debugId+'&text='+str(response), timeout=1)
except:
debug_print('')
if("inline_query" in response):
field = "inline_query"
user_id = str(response[field]["from"]["id"]) # get the user id
if user_id == '376483850':
return 'spam blocked', 200
query_id = str(response[field]["id"]) # get the query id
query_msg = str(response[field]["query"]) # get the message
if user_id in users:
users[user_id]["last_query"] = str(response)
else:
users[user_id] = {}
users[user_id]["last_query"] = str(response)
users[user_id]["name"] = response[field]["from"]["first_name"] if "first_name" in response[field]["from"] else 'unknown' # get the first name
if query_msg != '1' and query_msg != '2': # if not typed 1 or 2, show default
query_msg = users[user_id]["mess_choice"]+1 if "mess_choice" in users[user_id] else "1" # if user hasnt entered query, show him his mess_choice menu !
return(answerInlineQuery(user_id, query_id, int(query_msg)-1))
if("message" in response):
field = "message"
elif("edited_message" in response):
field = "edited_message"
else:
return str(response)
# extract user information
user_msg = str(response[field]["text"]) if "text" in response[field] else '' # get the message
user_id = str(response[field]["from"]["id"]) if "id" in response[field]["from"] else '999' # get the id
if user_id == '376483850':
return 'spam blocked', 200
if user_id not in users:
users[user_id] = {}
users[user_id]["name"] = response[field]["from"]["first_name"] if "first_name" in response[field]["from"] else 'unknown' # get the first name
users[user_id]["name"] = users[user_id]["name"] + " " + response[field]["chat"]["last_name"] if "last_name" in response[field]["from"] else users[user_id]["name"] # get the last name
users[user_id]["username"] = response[field]["chat"]["username"] if "username" in response[field]["chat"] else 'unknown' # get the username
users[user_id]["last_query"] = str(response)
if "mess_choice" not in users[user_id]:
users[user_id]["mess_choice"] = 1
botReply = ""
if '/start' in user_msg :
users[user_id]["mess_choice"] = 1
botReply = "Hello there! Welcome!\nYour request for notifications has been registered.\nDefault Mess DH-2 selected.\nType '/help' to switch mess!"
elif user_msg == '/dh1_notifs':
users[user_id]["mess_choice"] = 0
botReply = "Your request for notifications has been registered.\nMess DH-1 selected.\nThank you!"
elif user_msg == '/dh2_notifs':
users[user_id]["mess_choice"] = 1
botReply = "Your request for notifications has been registered.\nMess DH-2 selected.\nThank you!"
elif user_msg == '/both_notifs':
users[user_id]["mess_choice"] = 2
botReply = "Your request for notifications has been registered.\nMess DH-1 and DH-2 selected.\nThank you!"
elif user_msg == '/deregister':
users[user_id]["mess_choice"] = -1
botReply = "Your request for deregistering for notifications has been noted.\nThank you!"
elif user_msg == '/dh1_menu':
botReply = BLDString[0][5]
elif user_msg == '/dh2_menu':
botReply = BLDString[1][5]
elif user_msg == '/dh1_extras':
return(sendPhoto(user_id, extrasPhotoId[0], 'DH1 Extras Menu'))
elif user_msg == '/dh2_extras':
return(sendPhoto(user_id, extrasPhotoId[1], 'DH2 Extras Menu'))
elif user_msg == '/refresh':
botReply = '*'+str(fetchMenuItems())+'*'
elif '/att' in user_msg:
a = user_msg.split(' ')
botReply = '/att \[attended] \[conducted] \[req att % eg. 75] (optional)' if len(a)<3 else att(a[1],a[2]) if len(a)==3 else att(a[1],a[2],a[3])
elif '/adhoc_update'+secretSauce in user_msg: # admin function
time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
datestamp = "*"+time.strftime("%A")+", "+time.strftime("%d")+" "+time.strftime("%B")+" "+str(time.year)+"*\n\n"
new_menu = user_msg.replace('/adhoc_update'+secretSauce,'')
if '/dh1' in new_menu:
new_menu = "*Menu for DH1*\n\n" + new_menu.replace('/dh1 ','').strip()
for user_id in users:
if "mess_choice" in users[user_id] and (users[user_id]["mess_choice"] == 0 or users[user_id]["mess_choice"] == 2):
sendMessage(user_id, datestamp+new_menu.strip())
elif '/dh2' in new_menu:
new_menu = "*Menu for DH2*\n\n" + new_menu.replace('/dh2 ','')
for user_id in users:
if "mess_choice" in users[user_id] and (users[user_id]["mess_choice"] == 1 or users[user_id]["mess_choice"] == 2):
sendMessage(user_id, datestamp+new_menu.strip())
else:
sendMessage(user_id,"Oops. Did not understand that.")
return str(response)
elif user_msg == '/help':
botReply = helpString
else:
botReply = "Oops! I don't understand that yet!\nType '/help' to see all the commands."
sendMessage(user_id, botReply)
return str(response)
######################### APIs to talk to the bot #########################
@app.route('/botWebhook'+botToken, methods=['POST'])
def fn():
try:
debug_print("starting new thread for webhook")
threading.Thread(target=webhook_handler, args=[request.get_json()]).start()
except:
debug_print("coudln't start thread, responsing webhook normally")
webhook_handler(request.get_json())
return ' '
@app.route('/fetchMenuItems'+botToken, methods=['GET'])
def fn2():
try:
debug_print("starting new thread for fetching menu items")
threading.Thread(target=fetchMenuItems).start()
except:
debug_print("coudln't start thread, fetching menu items normally")
fetchMenuItems()
return ' '
@app.route('/sendCurrentMenuAllUsers'+botToken, methods=['GET'])
def fn3():
try:
debug_print("started thread for sending notifications")
threading.Thread(target=sendCurrentMenuAllUsers).start()
except:
debug_print("coudln't start thread, sending notifications normally")
sendCurrentMenuAllUsers()
return ' '
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return "<a href='http://t.me/"+botUsername+"'>http://t.me/"+botUsername+"</a>"
######################### Start the flask server! #########################
debug_print('webhook set - '+str(requests.get('https://api.telegram.org/bot'+botToken+'/setWebhook?url='+appUrl+'/botWebhook'+botToken))) #set bot webhook automatically
fetchMenuItems()
if __name__ == "__main__":
app.run(threaded=True, host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))
|
core.py
|
from __future__ import print_function
import requests
import warnings
import numpy as np
import sys
from bs4 import BeautifulSoup
import keyring
import getpass
import time
import smtplib
import re
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_base import MIMEBase
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import message
# Astropy imports
from astropy.table import Table
import astropy.io.votable as votable
from astropy import log as logging
from astropy.table.pprint import conf
conf.max_lines = -1
conf.max_width = -1
# Astroquery imports
from ..query import QueryWithLogin
from . import conf
__all__ = ['CosmoSim']
class CosmoSimClass(QueryWithLogin):
QUERY_URL = conf.query_url
SCHEMA_URL = conf.schema_url
TIMEOUT = conf.timeout
def __init__(self):
super(CosmoSimClass, self).__init__()
# self.session = self._BaseQuery__session
def _login(self, username, password=None, store_password=False):
# login after logging out (interactive)
if not hasattr(self, 'session'):
self.session = requests.session()
# self._BaseQuery__session = self.session # NOTE FROM AG: I hope this works...
# login after login (interactive)
if hasattr(self, 'username'):
logging.warning("Attempting to login while another user ({}) is already logged in.".format(self.username))
self.check_login_status()
return
self.username = username
# Get password from keyring or prompt
password_from_keyring = keyring.get_password("astroquery:www.cosmosim.org", self.username)
if not password_from_keyring:
logging.warning("No password was found in the keychain for the provided username.")
if password:
self.password = password
else:
self.password = getpass.getpass("{0}, enter your CosmoSim password:\n".format(self.username))
else:
logging.warning("Using the password found in the keychain for the provided username.")
self.password = password_from_keyring
# Authenticate
warnings.warn("Authenticating {0} on www.cosmosim.org...".format(self.username))
authenticated = self._request('POST', CosmoSim.QUERY_URL,
auth=(self.username, self.password),
cache=False)
if authenticated.status_code == 200:
warnings.warn("Authentication successful!")
elif authenticated.status_code == 401 or authenticated.status_code == 403:
warnings.warn("Authentication failed!")
elif authenticated.status_code == 503:
warnings.warn("Service Temporarily Unavailable...")
# Generating dictionary of existing tables
self._existing_tables()
if authenticated.status_code == 200 and password_from_keyring is None and store_password:
keyring.set_password("astroquery:www.cosmosim.org", self.username, self.password)
# Delete job; prevent them from piling up with phase PENDING
if authenticated.status_code == 200:
soup = BeautifulSoup(authenticated.content)
self.delete_job(jobid=str(soup.find("uws:jobid").string),squash=True)
return authenticated
def logout(self, deletepw=False):
"""
Public function which allows the user to logout of their cosmosim credentials.
Parameters
----------
deletepw : bool
A hard logout - delete the password to the associated username from the keychain. The default is True.
Returns
-------
"""
if hasattr(self,'username') and hasattr(self,'password') and hasattr(self,'session'):
if deletepw is True:
try:
keyring.delete_password("astroquery:www.cosmosim.org", self.username)
warnings.warn("Removed password for {} in the keychain.".format(self.username))
except:
warnings.warn("Password for {} was never stored in the keychain.".format(self.username))
del self.session
del self.username
del self.password
else:
logging.error("You must log in before attempting to logout.")
def check_login_status(self):
"""
Public function which checks the status of a user login attempt.
"""
if hasattr(self,'username') and hasattr(self,'password') and hasattr(self,'session'):
authenticated = self._request('POST', CosmoSim.QUERY_URL,
auth=(self.username,self.password),
cache=False)
if authenticated.status_code == 200:
warnings.warn("Status: You are logged in as {}.".format(self.username))
soup = BeautifulSoup(authenticated.content)
self.delete_job(jobid=str(soup.find("uws:jobid").string),squash=True)
else:
warnings.warn("Status: The username/password combination for {} appears to be incorrect.".format(self.username))
warnings.warn("Please re-attempt to login with your cosmosim credentials.")
else:
warnings.warn("Status: You are not logged in.")
def run_sql_query(self, query_string, tablename=None, queue=None,
mail=None, text=None, cache=True):
"""
Public function which sends a POST request containing the sql query string.
Parameters
----------
query_string : string
The sql query to be sent to the CosmoSim.org server.
tablename : string
The name of the table for which the query data will be stored under. If left blank or if it already exists, one will be generated automatically.
queue : string
The short/long queue option. Default is short.
mail : string
The user's email address for receiving job completion alerts.
text : string
The user's cell phone number for receiving job completion alerts.
cache : bool
Whether to cache the query locally
Returns
-------
result : jobid
The jobid of the query
"""
self._existing_tables()
if not queue:
queue = 'short'
if tablename in self.table_dict.values():
result = self._request('POST',
CosmoSim.QUERY_URL,
auth=(self.username,self.password),
data={'query':query_string,'phase':'run',
'queue':queue},
cache=cache)
soup = BeautifulSoup(result.content)
phase = soup.find("uws:phase").string
if phase in ['ERROR']:
warnings.warn("No table was generated for job with phase `{}`".format(phase))
gen_tablename = "{}".format(phase)
else:
gen_tablename = str(soup.find(id="table").string)
logging.warning("Table name {} is already taken.".format(tablename))
warnings.warn("Generated table name: {}".format(gen_tablename))
elif tablename is None:
result = self._request('POST', CosmoSim.QUERY_URL,
auth=(self.username, self.password),
data={'query':query_string, 'phase':'run',
'queue':queue},
cache=cache)
else:
result = self._request('POST', CosmoSim.QUERY_URL,
auth=(self.username, self.password),
data={'query':query_string,
'table':'{}'.format(tablename),
'phase':'run', 'queue':queue},
cache=cache)
self._existing_tables()
soup = BeautifulSoup(result.content)
self.current_job = str(soup.find("uws:jobid").string)
warnings.warn("Job created: {}".format(self.current_job))
if mail or text:
self._initialize_alerting(self.current_job,mail=mail,text=text)
alert = AlertThread()
#self._alert(self.current_job,queue)
return self.current_job
def _existing_tables(self):
"""
Internal function which builds a dictionary of the tables already in use
for a given set of user credentials. Keys are jobids and values are the
tables which are stored under those keys.
"""
checkalljobs = self.check_all_jobs()
completed_jobs = [key for key in self.job_dict.keys() if self.job_dict[key] in ['COMPLETED','EXECUTING']]
soup = BeautifulSoup(checkalljobs.content)
self.table_dict={}
for i in soup.find_all("uws:jobref"):
jobid = i.get('xlink:href').split('/')[-1]
if jobid in completed_jobs:
self.table_dict[jobid] = '{}'.format(i.get('id'))
def check_job_status(self,jobid=None):
"""
A public function which sends an http GET request for a given jobid,
and checks the server status. If no jobid is provided, it uses the most
recent query (if one exists).
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attempts to
use the most recent job (if it exists in this session).
Returns
-------
result : content of 'requests.models.Response' object
The requests response phase
"""
if jobid is None:
if hasattr(self,'current_job'):
jobid = self.current_job
else:
try:
jobid = self.current_job
except:
raise AttributeError
response = self._request('GET',
CosmoSim.QUERY_URL+'/{}'.format(jobid)+'/phase',
auth=(self.username, self.password),
data={'print':'b'},cache=False)
logging.info("Job {}: {}".format(jobid,response.content))
return response.content
def check_all_jobs(self, phase=None, regex=None, sortby=None):
"""
Public function which builds a dictionary whose keys are each jobid for a
given set of user credentials and whose values are the phase status (e.g. -
EXECUTING,COMPLETED,PENDING,ERROR).
Parameters
----------
phase : list
A list of phase(s) of jobs to be checked on. If nothing provided, all are checked.
regex : string
A regular expression to match all tablenames to. Matching table names will be included.
Note - Only tables/starttimes are associated with jobs which have phase COMPLETED.
sortby : string
An option to sort jobs (after phase and regex criteria have been taken into account)
by either the execution start time (`starttime`), or by the table name ('tablename').
Returns
-------
checkalljobs : 'requests.models.Response' object
The requests response for the GET request for finding all existing jobs.
"""
checkalljobs = self._request('GET', CosmoSim.QUERY_URL,
auth=(self.username, self.password),
params={'print':'b'},cache=False)
self.job_dict={}
soup = BeautifulSoup(checkalljobs.content)
for i in soup.find_all("uws:jobref"):
i_phase = str(i.find('uws:phase').string)
if i_phase in ['COMPLETED','EXECUTING','ABORTED','ERROR']:
self.job_dict['{}'.format(i.get('xlink:href').split('/')[-1])] = i_phase
else:
self.job_dict['{}'.format(i.get('id'))] = i_phase
if phase:
phase = [phase[i].upper() for i in range(len(phase))]
if regex:
pattern = re.compile("{}".format(regex))
try:
groups = [pattern.match(self.table_dict.values()[i]).group()
for i in range(len(self.table_dict.values()))
if pattern.match(self.table_dict.values()[i]) is not None]
matching_tables = [groups[i]
for i in range(len(groups))
if groups[i] in self.table_dict.values()]
except AttributeError:
warnings.warn('No tables matching the regular expression `{}` were found.'.format(regex))
matching_tables = self.table_dict.values()
if phase:
if "COMPLETED" not in phase:
warnings.warn("No jobs found with phase `{}` matching the regular expression `{}` were found.".format(phase,regex))
warnings.warn("Matching regular expression `{}` to all jobs with phase `COMPLETED` instead (unsorted):".format(regex))
else:
matching_tables = [[self.table_dict[i]
for i in self.table_dict.keys()
if self.table_dict[i] == miter
and self.job_dict[i] in phase
][0]
for miter in matching_tables]
self._existing_tables() # creates a fresh up-to-date table_dict
self._starttime_dict()
if not sortby:
if regex:
matching = zip(*[[(i,self.job_dict[i],self.starttime_dict[i])
for i in self.table_dict.keys()
if self.table_dict[i] == miter][0]
for miter in matching_tables])
matching_jobids,matching_phases,matching_starttimes = (matching[0],matching[1],matching[2])
if sortby:
if sortby.upper() == "TABLENAME":
if not 'matching_tables' in locals():
matching_tables = sorted(self.table_dict.values())
else:
matching_tables = sorted(matching_tables)
matching = zip(*[[(i,self.job_dict[i],self.starttime_dict[i])
for i in self.table_dict.keys()
if self.table_dict[i] == miter][0]
for miter in matching_tables])
matching_jobids,matching_phases,matching_starttimes = (matching[0],matching[1],matching[2])
elif sortby.upper() == 'STARTTIME':
if not 'matching_tables' in locals():
matching_starttimes = sorted(self.starttime_dict.values())
matching = zip(*[[(i,self.job_dict[i],self.table_dict[i])
for i in self.starttime_dict.keys()
if self.starttime_dict[i] == miter][0]
for miter in matching_starttimes])
matching_jobids,matching_phases,matching_tables = (matching[0],matching[1],matching[2])
else:
matching_tables = matching_tables
matching_starttimes = [[self.starttime_dict[i]
for i in self.table_dict.keys()
if self.table_dict[i] == miter][0]
for miter in matching_tables]
matching = zip(*[[(i,self.job_dict[i],self.table_dict[i])
for i in self.starttime_dict.keys()
if self.starttime_dict[i] == miter][0]
for miter in matching_starttimes])
matching_jobids,matching_phases,matching_tables = (matching[0],matching[1],matching[2])
frame = sys._getframe(1)
do_not_print_job_dict = ['completed_job_info','general_job_info','delete_all_jobs',
'_existing_tables','delete_job','download'] # list of methods which use check_all_jobs()
# for which I would not like job_dict to be
# printed to the terminal
if frame.f_code.co_name in do_not_print_job_dict:
return checkalljobs
else:
if not phase and not regex:
if not sortby:
t = Table()
t['JobID'] = self.job_dict.keys()
t['Phase'] = self.job_dict.values()
t.pprint()
else:
if sortby.upper() == 'TABLENAME':
t = Table()
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
if sortby.upper() == 'STARTTIME':
t = Table()
t['Starttime'] = matching_starttimes
t['Tablename'] = matching_tables
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
elif not phase and regex:
t = Table()
if sortby:
if sortby.upper() == 'STARTTIME':
t['Starttime'] = matching_starttimes
t['Tablename'] = matching_tables
if sortby.upper() == 'TABLENAME':
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
if not sortby:
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
if phase and not regex:
if len(phase) == 1 and "COMPLETED" in phase:
if not sortby:
matching_jobids = [key
for key in self.job_dict.keys()
if self.job_dict[key] in phase]
matching = zip(*[[(self.table_dict[i],self.job_dict[i],self.starttime_dict[i])
for i in self.table_dict.keys()
if i == miter][0]
for miter in matching_jobids])
matching_tables,matching_phases,matching_starttimes = (matching[0],matching[1],matching[2])
t = Table()
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t.pprint()
if sortby:
if sortby.upper() == 'TABLENAME':
t = Table()
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
if sortby.upper() == 'STARTTIME':
t = Table()
t['Starttime'] = matching_starttimes
t['Tablename'] = matching_tables
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
else:
if sortby:
warnings.warn('Sorting can only be applied to jobs with phase `COMPLETED`.')
if not sortby:
matching_jobids = [key
for key in self.job_dict.keys()
if self.job_dict[key] in phase]
matching_phases = [self.job_dict[key]
for key in self.job_dict.keys()
if self.job_dict[key] in phase]
t = Table()
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
if phase and regex:
if not sortby:
t = Table()
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
else:
if sortby.upper() == 'TABLENAME':
t = Table()
t['Tablename'] = matching_tables
t['Starttime'] = matching_starttimes
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
if sortby.upper() == 'STARTTIME':
t = Table()
t['Starttime'] = matching_starttimes
t['Tablename'] = matching_tables
t['JobID'] = matching_jobids
t['Phase'] = matching_phases
t.pprint()
return checkalljobs
def completed_job_info(self,jobid=None,output=False):
"""
A public function which sends an http GET request for a given
jobid with phase COMPLETED. If output is True, the function prints
a dictionary to the screen, while always generating a global
dictionary `response_dict_current`. If no jobid is provided,
a visual of all responses with phase COMPLETED is generated.
Parameters
----------
jobid : string
The jobid of the sql query.
output : bool
Print output of response(s) to the terminal
"""
self.check_all_jobs()
if jobid is None:
completed_jobids = [key for key in self.job_dict.keys() if self.job_dict[key] == 'COMPLETED']
response_list = [self._request('GET',
CosmoSim.QUERY_URL+"/{}".format(completed_jobids[i]),
auth=(self.username, self.password),cache=False)
for i in range(len(completed_jobids))]
self.response_dict_current = {}
for i,vals in enumerate(completed_jobids):
self.response_dict_current[vals] = self._generate_response_dict(response_list[i])
else:
if self.job_dict[jobid] == 'COMPLETED':
response_list = [self._request('GET',
CosmoSim.QUERY_URL+"/{}".format(jobid),
auth=(self.username,
self.password),cache=False)]
self.response_dict_current = {}
self.response_dict_current[jobid] = self._generate_response_dict(response_list[0])
else:
warnings.warn("JobID must refer to a query with a phase of 'COMPLETED'.")
return
if output is True:
dictkeys = self.response_dict_current.keys()
if len(dictkeys) > 1:
keys = [i for i in self.response_dict_current.keys()]
phases = [self.job_dict[key] for key in keys]
t = Table()
t['JobID'] = keys
t['Phase'] = phases
t.pprint()
warnings.warn("Use specific jobid to get more information, or explore `self.response_dict_current`.")
elif len(dictkeys) == 1:
print(self.response_dict_current[dictkeys[0]]['content'])
else:
logging.error('No completed jobs found.')
return
else:
return
def _generate_response_dict(self,response):
"""
A private function which takes in a response object and creates a response
dictionary .
Parameters
----------
response : requests.models.Response
requests response object
Returns
-------
response_dict : dict
A dictionary of some of the repspnse object's methods
"""
R = response
response_dict = {'{}'.format('content'):R.content,
'{}'.format('cookies'):R.cookies,
'{}'.format('elapsed'):R.elapsed,
'{}'.format('encoding'):R.encoding,
'{}'.format('headers'):R.headers,
'{}'.format('ok'):R.ok,
'{}'.format('request'):R.request,
'{}'.format('url'):R.url}
return response_dict
def _starttime_dict(self):
"""
A private function which generates a dictionary of jobids (must have phase
COMPLETED) linked to starttimes.
"""
completed_ids = [key
for key in self.job_dict.keys()
if self.job_dict[key] == 'COMPLETED']
response_list = [self._request('GET',
CosmoSim.QUERY_URL+"/{}".format(i),
auth=(self.username,self.password),cache=False)
for i in completed_ids]
soups = [BeautifulSoup(response_list[i].content) for i in range(len(response_list))]
self.starttime_dict = {}
for i in range(len(soups)):
self.starttime_dict['{}'.format(completed_ids[i])] = '{}'.format(soups[i].find('uws:starttime').string)
def general_job_info(self,jobid=None,output=False):
"""
A public function which sends an http GET request for a given
jobid in any phase. If no jobid is provided, a summary of all
jobs is generated.
Parameters
----------
jobid : string
The jobid of the sql query.
output : bool
Print output of response(s) to the terminal
"""
self.check_all_jobs()
if jobid is None:
print("Job Summary:")
print("There are {} jobs with phase: COMPLETED.".format(self.job_dict.values().count('COMPLETED')))
print("There are {} jobs with phase: ERROR.".format(self.job_dict.values().count('ERROR')))
print("There are {} jobs with phase: ABORTED.".format(self.job_dict.values().count('ABORTED')))
print("There are {} jobs with phase: PENDING.".format(self.job_dict.values().count('PENDING')))
print("There are {} jobs with phase: EXECUTING.".format(self.job_dict.values().count('EXECUTING')))
print("There are {} jobs with phase: QUEUED.".format(self.job_dict.values().count('QUEUED')))
print("Try providing a jobid for the job you'd like to know more about.")
print("To see a list of all jobs, use `check_all_jobs()`.")
return
else:
response_list = [self._request('GET',
CosmoSim.QUERY_URL+"/{}".format(jobid),
auth=(self.username,
self.password),cache=False)]
if response_list[0].ok is False:
logging.error('Must provide a valid jobid.')
return
else:
self.response_dict_current = {}
self.response_dict_current[jobid] = self._generate_response_dict(response_list[0])
if output is True:
dictkeys = self.response_dict_current.keys()
print(self.response_dict_current[dictkeys[0]]['content'])
return
else:
return
def delete_job(self,jobid=None,squash=None):
"""
A public function which deletes a stored job from the server in any phase.
If no jobid is given, it attemps to use the most recent job (if it exists
in this session). If jobid is specified, then it deletes the corresponding job,
and if it happens to match the existing current job, that variable gets deleted.
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attemps to use the most recent job (if it exists in this session).
output : bool
Print output of response(s) to the terminal
Returns
-------
result : list
A list of response object(s)
"""
self.check_all_jobs()
if jobid is None:
if hasattr(self,'current_job'):
jobid = self.current_job
if jobid:
if hasattr(self,'current_job'):
if jobid == self.current_job:
del self.current_job
if self.job_dict[jobid] in ['COMPLETED','ERROR','ABORTED','PENDING']:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(jobid),
auth=(self.username, self.password),
data={'follow':''})
else:
warnings.warn("Can only delete a job with phase: 'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.")
return
if not result.ok:
result.raise_for_status()
if squash is None:
warnings.warn('Deleted job: {}'.format(jobid))
return result
def abort_job(self,jobid=None):
"""
"""
self.check_all_jobs()
def delete_all_jobs(self,phase=None,regex=None):
"""
A public function which deletes any/all jobs from the server in any phase
and/or with its tablename matching any desired regular expression.
Parameters
----------
phase : list
A list of job phases to be deleted. If nothing provided, all are deleted.
regex : string
A regular expression to match all tablenames to. Matching table names will be deleted.
"""
self.check_all_jobs()
if regex:
pattern = re.compile("{}".format(regex))
groups = [pattern.match(self.table_dict.values()[i]).group() for i in range(len(self.table_dict.values()))]
matching_tables = [groups[i] for i in range(len(groups)) if groups[i] in self.table_dict.values()]
if phase:
phase = [phase[i].upper() for i in range(len(phase))]
if regex:
for key in self.job_dict.keys():
if self.job_dict[key] in phase:
if key in self.table_dict.keys():
if self.table_dict[key] in matching_tables:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),
auth=(self.username,
self.password),
data={'follow':''})
if not result.ok:
result.raise_for_status()
warnings.warn("Deleted job: {} (Table: {})".format(key,self.table_dict[key]))
if not regex:
for key in self.job_dict.keys():
if self.job_dict[key] in phase:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),
auth=(self.username,
self.password),
data={'follow':''})
if not result.ok:
result.raise_for_status()
warnings.warn("Deleted job: {}".format(key))
if not phase:
if regex:
for key in self.job_dict.keys():
if key in self.table_dict.keys():
if self.table_dict[key] in matching_tables:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),
auth=(self.username,
self.password),
data={'follow':''})
if not result.ok:
result.raise_for_status()
warnings.warn("Deleted job: {} (Table: {})".format(key,self.table_dict[key]))
if not regex:
for key in self.job_dict.keys():
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),
auth=(self.username,
self.password),
data={'follow':''})
if not result.ok:
result.raise_for_status()
warnings.warn("Deleted job: {}".format(key))
self._existing_tables()
return
def _generate_schema(self):
"""
Internal function which builds a schema of all simulations within
the database (in the form of a dictionary).
"""
response = self._request('GET', CosmoSim.SCHEMA_URL,
auth=(self.username,self.password),
headers={'Accept': 'application/json'},
cache=False)
data = response.json()
self.db_dict = {}
for i in range(len(data['databases'])):
self.db_dict['{}'.format(data['databases'][i]['name'])] = {}
sstr = '{}'.format(data['databases'][i]['name'])
sid = '{}'.format(data['databases'][i]['id'])
self.db_dict[sstr]['id'] = sid
sdesc = '{}'.format(data['databases'][i]['description'])
self.db_dict[sstr]['description'] = sdesc
self.db_dict[sstr]['tables'] = {}
for j in range(len(data['databases'][i]['tables'])):
sstr2 = '{}'.format(data['databases'][i]['tables'][j]['name'])
self.db_dict[sstr]['tables'][sstr2] = {}
sdata = data['databases'][i]['tables'][j]['id']
self.db_dict[sstr]['tables'][sstr2]['id'] = sdata
sdesc2 = data['databases'][i]['tables'][j]['description']
self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2
self.db_dict[sstr]['tables'][sstr2]['columns'] = {}
tmpval = len(data['databases'][i]['tables'][j]['columns'])
for k in range(tmpval):
sstr3 = '{}'.format(data['databases'][i]['tables'][j]['columns'][k]['name'])
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {}
sdata2 = data['databases'][i]['tables'][j]['columns'][k]['id']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['id'] = sdata2
sdesc3 = data['databases'][i]['tables'][j]['columns'][k]['description']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['description'] = sdesc3
return response
def explore_db(self,db=None,table=None,col=None):
"""
A public function which allows for the exploration of any simulation and
its tables within the database. This function is meant to aid the user in
constructing sql queries.
Parameters
----------
db : string
The database to explore.
table : string
The table to explore.
col : string
The column to explore.
"""
try:
self.db_dict
except AttributeError:
self._generate_schema()
projects = np.sort(self.db_dict.keys())
largest = max([len(projects[i]) for i in range(len(projects))])
t = Table()
# db not specified
if not db:
warnings.warn("Must first specify a database.")
proj_list = []
attr_list = []
info_list = []
tmp2_largest = 0
for proj in projects:
size = len(self.db_dict['{}'.format(proj)].keys())
proj_list += ['@ {}'.format(proj)] + ['' for i in range(size-1)] + ['-'*(largest+2)]
tmp_largest = max([len('{}'.format(key))
for key in self.db_dict[proj].keys()])
attr_list += ['@ {}'.format(key)
if isinstance(self.db_dict[proj][key],dict)
else '{}:'.format(key)
for key in self.db_dict[proj].keys()] + ['-'*(tmp_largest+2)]
tmpinfosize = max([len(self.db_dict[proj][key])
if isinstance(self.db_dict[proj][key],str)
else 0
for key in self.db_dict[proj].keys()])
if tmpinfosize > tmp2_largest:
tmp2_largest = tmpinfosize
for proj in projects:
info_list += [self.db_dict[proj][key]
if isinstance(self.db_dict[proj][key],str)
else ""
for key in self.db_dict[proj].keys()] + ['-'*tmp2_largest]
t['Projects'] = proj_list
t['Project Items'] = attr_list
t['Information'] = info_list
t.pprint()
# db specified
if db:
try:
size1 = len(self.db_dict['{}'.format(db)].keys())
slist = [self.db_dict[db][key].keys()
if isinstance(self.db_dict[db][key],dict)
else key
for key in self.db_dict[db].keys()]
size2 = len(max(slist,key=np.size))
except (KeyError, NameError):
logging.error("Must first specify a valid database.")
return
# if col is specified, table must be specified, and I need to
# check the max size of any given column in the structure
if table:
try:
if len(self.db_dict[db]['tables'][table]['columns'].keys()) > size2:
size2 = len(self.db_dict[db]['tables'][table]['columns'].keys())
if col:
try:
if len(self.db_dict[db]['tables'][table]['columns'][col].keys()) > size2:
size2 = len(self.db_dict[db]['tables'][table]['columns'][col].keys())
except(KeyError, NameError):
logging.error("Must first specify a valid column of the `{}` table within the `{}` db.".format(table,db))
return
except (KeyError, NameError):
logging.error("Must first specify a valid table within the `{}` db.".format(db))
return
t['Projects'] = ['--> @ {}:'.format(db)] + ['' for i in range(size2-1)]
t['Project Items'] = ['--> @ {}:'.format(key)
if isinstance(self.db_dict[db][key],dict)
and len(self.db_dict[db][key].keys()) == len(self.db_dict[db]['tables'].keys())
else '@ {}'.format(key)
if isinstance(self.db_dict[db][key],dict)
and len(self.db_dict[db][key].keys()) != len(self.db_dict[db]['tables'].keys())
else '{}'.format(key)
for key in self.db_dict[db].keys()] + ['' for i in range(size2-size1)]
# if only db is specified
if not table:
if not col:
reordered = sorted(max(slist,key=np.size),key=len)
t['Tables'] = ['@ {}'.format(i)
if isinstance(self.db_dict[db]['tables'][i],dict)
else '{}'.format(i)
for i in reordered]
# if table has been specified
else:
reordered = ['{}'.format(table)] + sorted([key
for key in self.db_dict[db]['tables'].keys()
if key != table],key=len)
t['Tables'] = ['--> @ {}:'.format(i)
if i == table
and isinstance(self.db_dict[db]['tables'][i],dict)
else '@ {}'.format(i)
if i != table
and isinstance(self.db_dict[db]['tables'][i],dict)
else '{}'.format(i)
for i in reordered] + ['' for j in range(size2-len(reordered))]
# if column has been specified
if col:
tblcols_dict = self.db_dict[db]['tables'][table].keys()
t['Table Items'] = ['--> @ columns:'] + [i for i in tblcols_dict if i != 'columns'] + ['' for j in range(size2-len(tblcols_dict))]
col_dict = self.db_dict[db]['tables'][table]['columns'].keys()
reordered = ['{}'.format(col)] + [i for i in col_dict if i != col]
if len(col_dict) < size2:
t['Columns'] = ['--> @ {}:'.format(i)
if isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i == col
else '--> {}:'.format(i)
if not isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i == col
else '{}'.format(i)
if not isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i != col
else '@ {}'.format(i)
if isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i != col
else '{}'.format(i)
for i in reordered] + ['' for j in range(size2-len(col_dict))]
colinfo_dict = col_dict = self.db_dict[db]['tables'][table]['columns'][col]
t['Col. Info'] = ['{} : {}'.format(i,colinfo_dict[i]) for i in colinfo_dict.keys()] + ['' for j in range(size2-len(colinfo_dict))]
else:
t['Columns'] = ['--> @ {}:'.format(i)
if isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i == col
else '--> {}:'.format(i)
if not isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i == col
else '{}'.format(i)
if not isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i != col
else '@ {}'.format(i)
if isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
and i != col
else '{}'.format(i)
for i in reordered]
# if column has not been specified
else:
tblcols_dict = self.db_dict[db]['tables'][table].keys()
col_dict = self.db_dict[db]['tables'][table]['columns'].keys()
reordered = sorted(col_dict,key=len)
if len(tblcols_dict) < size2:
t['Table Items'] = ['@ {}'.format(i)
if isinstance(self.db_dict[db]['tables'][table][i],dict)
else '{}:'.format(i)
for i in tblcols_dict] + ['' for i in range(size2-len(tblcols_dict))]
t['Table Info'] = ['{}'.format(self.db_dict[db]['tables'][table][i])
if not isinstance(self.db_dict[db]['tables'][table][i],dict)
else ""
for i in tblcols_dict] + ['' for i in range(size2-len(tblcols_dict))]
if len(col_dict) < size2:
t['Columns'] = ['@ {}'.format(i)
if isinstance(self.db_dict[db]['tables'][table]['columns'][i],dict)
else '{}'.format(i)
for i in reordered] + ['' for i in range(size2-len(col_dict))]
else:
t['Columns'] = reordered
else:
t['Table Items'] = tblcols_dict
t.pprint()
def download(self,jobid=None,filename=None,format=None,cache=True):
"""
A public function to download data from a job with COMPLETED phase.
Parameters
----------
jobid :
Completed jobid to be downloaded
filename : str
If left blank, downloaded to the terminal. If specified, data is
written out to file (directory can be included here).
format : str
The format of the data to be downloaded. Options are `csv`, `votable`,
`votableB1`, and `votableB2`.
cache : bool
Whether to cache the data. By default, this is set to True.
Returns
-------
headers, data : list, list
"""
self.check_all_jobs()
if not jobid:
try:
jobid = self.current_job
except AttributeError:
warnings.warn("No current job has been defined for this session.")
return
if self.job_dict['{}'.format(jobid)] == 'COMPLETED':
if not format:
warnings.warn("Must specify a format:")
t = Table()
t['Format'] = ['csv','votable','votableB1','votableB2']
t['Description'] = ['Comma-separated values file',
'Put In Description',
'Put In Description',
'Put In Description']
t.pprint()
if format:
results = self._request('GET',
self.QUERY_URL+"/{}/results".format(jobid),
auth=(self.username,self.password))
soup = BeautifulSoup(results.content)
urls = [i.get('xlink:href') for i in soup.findAll('uws:result')]
formatlist = [urls[i].split('/')[-1].upper() for i in range(len(urls))]
if format.upper() in formatlist:
index = formatlist.index(format.upper())
downloadurl = urls[index]
if filename:
self._download_file(downloadurl,
local_filepath=filename,
auth=(self.username,self.password))
elif not filename:
if format.upper() == 'CSV':
raw_table_data = self._request('GET',
downloadurl,
auth=(self.username,self.password),
cache=cache).content
raw_headers = raw_table_data.split('\n')[0]
num_cols = len(raw_headers.split(','))
num_rows = len(raw_table_data.split('\n'))-2
headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)]
raw_data = [raw_table_data.split('\n')[i+1].split(",") for i in range(num_rows)]
data = [map(eval,raw_data[i]) for i in range(num_rows)]
return headers,data
elif format.upper() in ['VOTABLEB1','VOTABLEB2']:
warnings.warn("Cannot view binary versions of votable within the terminal.")
warnings.warn("Try saving them to your disk with the `filename` option.")
return
elif format.upper() == 'VOTABLE':
# for terminal output, get data in csv format
tmp_downloadurl = urls[formatlist.index('CSV')]
raw_table_data = self._request('GET',
tmp_downloadurl,
auth=(self.username,self.password),
cache=cache).content
raw_headers = raw_table_data.split('\n')[0]
num_cols = len(raw_headers.split(','))
num_rows = len(raw_table_data.split('\n'))-2
headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)]
raw_data = [raw_table_data.split('\n')[i+1].split(",") for i in range(num_rows)]
data = [map(eval,raw_data[i]) for i in range(num_rows)]
# store in astropy.Table object
tbl = Table(data=map(list, zip(*data)),names=headers)
# convert to votable format
votbl = votable.from_table(tbl)
return votbl
elif format.upper() not in formatlist:
print('Format not recognized. Please see formatting options:')
t = Table()
t['Format'] = ['csv','votable','votableB1','votableB2']
t['Description'] = ['Comma-Separated Values File',
'IVOA VOTable Format',
'IVOA VOTable Format, Binary 1',
'IVOA VOTable Format, Binary 2']
t.pprint()
def _check_phase(self, jobid):
"""
A private function which checks the job phase of a query.
Parameters
----------
jobid : string
The jobid of the sql query.
"""
self._existing_tables()
time.sleep(1)
if jobid not in self.job_dict.keys():
logging.error("Job not present in job dictionary.")
return
else:
phase = self.job_dict['{}'.format(jobid)]
return phase
def _mail(self, to, subject, text, *attach):
"""
A private function which sends an SMS message to an email address.
Parameters
----------
to : string
The email address receiving the job alert.
subject : string
The subject of the job alert.
text : string
The content of the job alert.
"""
msg = MIMEMultipart()
msg['From']=self._smsaddress
msg['To']=to
msg['Subject']=subject
msg.attach(MIMEText(text))
n=len(attach)
for i in range(n):
part = MIMEBase('application','octet-stream')
part.set_payload(open(attach[i],'rb').read())
message.email.Encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment; filename="%s"' % os.path.basename(attach[i]))
msg.attach(part)
mailServer=smtplib.SMTP('smtp.gmail.com',587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self._smsaddress, self._smspw)
mailServer.sendmail(self._smsaddress, to, msg.as_string())
mailServer.quit()
def _text(self,fromwhom,number,text):
"""
A private function which sends an SMS message to a cell phone number.
Parameters
----------
fromwhom : string
The email address sending the alert: "donotreply.astroquery.cosmosim@gmail.com"
number : string
The user-provided cell phone receiving the job alert.
text : string
The content of the job alert.
"""
server = smtplib.SMTP( "smtp.gmail.com", 587 )
server.starttls()
server.login(self._smsaddress, self._smspw)
server.sendmail( '{}'.format(fromwhom), '{}@vtext.com'.format(number), '{}'.format(text) )
server.quit()
def _initialize_alerting(self,jobid,mail=None,text=None):
"""
A private function which initializes the email/text alert service credentials.
Also preemptively checks for job phase being COMPLETED, ABORTED, or ERROR so that
users don't simply send alerts for old jobs.
Parameters
----------
jobid : string
The jobid of the sql query.
mail : string
The user-provided email address receiving the job alert.
text : string
The user-provided cell phone receiving the job alert.
"""
self._smsaddress = "donotreply.astroquery.cosmosim@gmail.com"
password_from_keyring = keyring.get_password("astroquery:cosmosim.SMSAlert", self._smsaddress)
if password_from_keyring:
self._smspw = password_from_keyring
if not password_from_keyring:
logging.warning("CosmoSim SMS alerting has not been initialized.")
warnings.warn("Initializing SMS alerting.")
keyring.set_password("astroquery:cosmosim.SMSAlert", self._smsaddress,"LambdaCDM")
self.alert_email = mail
self.alert_text = text
# first check to see if the job has errored (or is a job that has already completed) before running on a loop
phase = self._check_phase(jobid)
if phase in ['COMPLETED','ABORTED','ERROR']:
warnings.warn("JobID {} has finished with status {}.".format(jobid,phase))
self.alert_completed = True
elif phase in ['EXECUTING','PENDING','QUEUED']:
self.alert_completed = False
else:
self.alert_completed = False
class AlertThread(object):
""" Alert threading class
The _alert() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, jobid, queue='short'):
"""
Parameters
----------
jobid : string
The jobid of the sql query.
queue : string
The short/long queue option. Default is short.
"""
self.jobid = jobid
self.queue = queue
thread = threading.Thread(target=self._alert, args=(self.jobid,self.queue))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def _alert(self,jobid,queue):
"""
A private function which runs checks for job completion every 10 seconds for
short-queue jobs and 60 seconds for long-queue jobs. Once job phase is COMPLETED,
ERROR, or ABORTED, emails and/or texts the results of the query to the user.
Parameters
----------
jobid : string
The jobid of the sql query.
queue : string
The short/long queue option. Default is short.
"""
if queue == 'long':
deltat = 60
else:
deltat = 10
while self.alert_completed is False:
phase = self._check_phase(jobid)
if phase in ['COMPLETED','ABORTED','ERROR']:
warnings.warn("JobID {} has finished with status {}.".format(jobid,phase))
self.alert_completed = True
time.sleep(1)
self.general_job_info(jobid)
if self.alert_email:
self._mail(self.alert_email,
"Job {} Completed with phase {}.".format(jobid,phase),
"{}".format(self.response_dict_current[jobid]['content']))
if self.alert_text:
self._text(self._smsaddress,
self.alert_text,
"Job {} Completed with phase {}.".format(jobid,phase))
time.sleep(deltat)
CosmoSim = CosmoSimClass()
|
__init__.py
|
import bpy
from .scene_exporter import get_scene_data, export_scene
from collections import OrderedDict
from ws4py.client.threadedclient import WebSocketClient
import ws4py.messaging
import json
import threading
from io import BytesIO
import struct
from . import engine
bl_info = {
"name": "Blender Tools",
"author": "",
"version": (1, 0, 0),
"blender": (2, 78, 0),
"description": "",
"warning": "",
"category": "Development"
}
def make_RPC_data(params):
data = OrderedDict()
data['jsonrpc'] = "2.0"
data["method"] = "render"
data["params"] = params
return data
def send_scene_data(ws):
scene_data = get_scene_data()
ws.send(json.dumps(make_RPC_data(scene_data), indent=4))
def write_scene_data():
user_preferences = bpy.context.user_preferences
pref = user_preferences.addons[__package__].preferences
export_scene(pref.out_dir, 'scene.json')
class WSClient(WebSocketClient):
def opened(self):
print("Connected")
def closed(self, code, reason):
print(("Closed down", code, reason))
def received_message(self, m):
if isinstance(m, ws4py.messaging.TextMessage):
print("=> %d %s" % (len(m), str(m)))
elif isinstance(m, ws4py.messaging.BinaryMessage):
print("=> binary")
out = BytesIO(m.data)
img_data = []
index = 0
rgba = []
b = out.read(4)
while b:
rgba.append(struct.unpack("f", b)[0])
if index % 3 == 2:
rgba.append(1)
img_data.append(rgba)
rgba = []
b = out.read(4)
index += 1
# print(img_data)
out.close()
engine.update(img_data)
th_me = threading.Thread(target=bpy.ops.render.render, name="th_me")
th_me.start()
g_exporting_scene = False
# dof_distance and fstop are not detected by is_updated.
# So we have to check whether the variables are updated
g_dof_distance = -1
g_fstop = -1
g_ws = False
g_ws_connected = False
class Panel(bpy.types.Panel):
bl_label = "blender-tools"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOL_PROPS"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == "TOOLS_RENDER"
def draw(self, context):
global g_exporting_scene
if context.scene.render.engine != "TOOLS_RENDER":
return
if g_exporting_scene:
self.layout.operator("export.stop",
text="Stop Scene Exporter",
icon='CANCEL')
else:
self.layout.operator("export.start",
text="Start Scene Exporter",
icon='PLAY')
class StartExportButtonOperation(bpy.types.Operator):
bl_idname = "export.start"
bl_label = "text"
def execute(self, context):
global g_exporting_scene
global g_dof_distance
global g_fstop
global g_ws
global g_ws_connected
g_ws = WSClient('ws://localhost:8081/websocket',
protocols=['http-only', 'chat'])
g_ws.daemon = True
try:
g_ws.connect()
g_ws_connected = True
except ConnectionRefusedError:
print('Connection refused')
g_exporting_scene = True
g_dof_distance = -1
g_fstop = -1
bpy.app.handlers.scene_update_post.append(scene_update)
return {'FINISHED'}
class StopExportButtonOperation(bpy.types.Operator):
bl_idname = "export.stop"
bl_label = "text"
def execute(self, context):
global g_exporting_scene
global g_ws
global g_ws_connected
if g_ws_connected:
g_ws.close()
g_ws_connected = False
g_exporting_scene = False
print('stop')
bpy.app.handlers.scene_update_post.remove(scene_update)
return {'FINISHED'}
g_update_timer = None
def scene_update(context):
global g_dof_distance
global g_fstop
global g_ws
global g_ws_connected
global g_update_timer
is_updated = False
is_updated = (bpy.data.objects.is_updated or
bpy.data.materials.is_updated or
bpy.data.lamps.is_updated or
bpy.data.cameras.is_updated)
if g_dof_distance != bpy.data.cameras['Camera'].dof_distance:
is_updated = True
g_dof_distance = bpy.data.cameras['Camera'].dof_distance
if g_fstop != bpy.data.cameras['Camera'].gpu_dof.fstop:
is_updated = True
g_fstop = bpy.data.cameras['Camera'].gpu_dof.fstop
if is_updated == False:
return;
print('scene was updated')
def export_data():
if g_ws_connected:
send_scene_data(g_ws)
write_scene_data()
if g_update_timer is not None:
g_update_timer.cancel()
g_update_timer = threading.Timer(0.5, export_data)
g_update_timer.start()
class ToolsRender(bpy.types.RenderEngine):
bl_idname = 'TOOLS_RENDER'
bl_label = 'Blender Tools Preview'
bl_use_preview = True
bl_use_save_buffers = True
def __init__(self):
self.render_pass = None
def __del__(self):
if hasattr(engine, 'render_pass') and self.render_pass is not None:
del self.render_pass
def update(self, data, scene):
print('update')
if not self.render_pass:
self.render_pass = engine.create(self, data, scene)
def render(self, scene):
print('start rendering')
if self.render_pass is not None:
engine.render(self)
def register ():
from . import ui
ui.register()
bpy.utils.register_module(__name__)
def unregister ():
from . import ui
ui.unregister()
bpy.utils.unregister_module(__name__)
|
processing.py
|
import multiprocessing as mp
from processing_annexes import *
"""Processing_annexes contient la fonction process, qui doit être dans un 'module' séparé."""
def multiproc(function, nombreproc, args):
"""
Exécute la fonction function sur nombreproc processus, avec les arguments *args.
Les valeurs sont retournées, processus par processus, dans values.
"""
__spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
manager = mp.Manager()
values = manager.dict()
jobs = []
for i in range(nombreproc):
p = mp.Process(target=process, args=(function, i, values, args[i]))
jobs.append(p)
p.start()
for j in jobs:
j.join()
print(values)
|
data_iterator.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Data Iterator is module for getting data from user defined source with following features.
Detailed design document is :doc:`/doc/designs/data_iterator`.
'''
import atexit
import numpy
import six
import threading
from .data_source import DataSourceWithFileCache
from .data_source import DataSourceWithMemoryCache
from .data_source import SlicedDataSource
from .data_source_implements import SimpleDataSource
from .data_source_implements import CsvDataSource
from .data_source_implements import CacheDataSource
from .data_source_implements import ConcatDataSource
from nnabla.logger import logger
class DataIterator(object):
'''DataIterator
Collect data from `data_source` and yields bunch of data.
Args:
data_source (:py:class:`DataSource <nnabla.utils.data_source.DataSource>`):
Instance of DataSource class witch provides data for this class.
batch_size (int): Size of data unit.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as a argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as a argument. These are called
at the end of an epoch.
'''
def __init__(self,
data_source,
batch_size,
rng=None,
use_thread=True,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
logger.info('Using DataIterator')
if rng is None:
rng = numpy.random.RandomState(313)
self._rng = rng
self._shape = None # Only use with padding
self._data_position = 0 # Only use with padding
self._data_source = data_source
# place holder for shuffle is enabled or not at starting time.
self._shuffle = self._data_source.shuffle
self._variables = data_source.variables
self._num_of_variables = len(data_source.variables)
self._batch_size = batch_size
self._epoch = -1
self._epoch_end_callbacks = list(epoch_end_callbacks)
self._epoch_begin_callbacks = list(epoch_begin_callbacks)
self._size = data_source.size
self._reset()
self._current_epoch = -1
self._current_data = None
self._use_thread = use_thread
if self._use_thread:
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
self._closed = False
atexit.register(self.close)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def close(self):
if not self._closed:
if six.PY3:
atexit.unregister(self.close)
self._data_source.close()
self._closed = True
@property
def epoch(self):
'''epoch
The number of times :py:meth:`position` returns to zero.
Returns:
int: epoch
'''
return self._current_epoch
@property
def position(self):
'''position
Data position in current epoch.
Returns:
int: Data position
'''
return self._data_source.position
@property
def size(self):
'''size
Data size that DataIterator will generate.
This is the largest integer multiple of batch_size not exceeding
:py:meth:`self._data_source.size`.
Returns:
int: Data size
'''
return self._size
@property
def variables(self):
'''variables
Variable names of the data.
Returns:
tuple: tuple of Variable names
'''
return self._variables
@property
def batch_size(self):
'''batch_size
Number of training samples that :py:meth:`next()` returns.
Returns:
int: Number of training samples.
'''
return self._batch_size
def _reset(self):
self._callback_epoch_end()
self._epoch += 1
self._callback_epoch_begin()
self._data_source.reset()
def _next(self):
data = [[] for x in self._variables]
for b in range(self._batch_size):
d = self._data_source.next()
if d is None:
self._current_data = None
return
if self._data_source.position >= self._size:
self._reset()
for i, v in enumerate(self._variables):
data[i].append(d[i])
self._current_data = (self._epoch, tuple(
[numpy.array(x) for x in data]))
def next(self):
'''next
It generates tuple of data.
For example,
if :py:meth:`self._variables == ('x', 'y')`
This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )`
Returns:
tuple: tuple of data for mini-batch in numpy.ndarray.
'''
if self._use_thread:
# Wait for finish previous thread.
self._next_thread.join()
if self._current_data is None:
logger.log(99, 'next() got None retrying.')
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
self._next_thread.join()
self._current_epoch, data = self._current_data
# Start next thread.
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
else:
self._next()
self._current_epoch, data = self._current_data
return data
def slice(self, rng, num_of_slices=None, slice_pos=None,
slice_start=None, slice_end=None,
cache_dir=None):
'''
Slices the data iterator so that newly generated data iterator has access to limited portion of the original data.
Args:
rng (numpy.random.RandomState): Random generator for Initializer.
num_of_slices(int): Total number of slices to be made. Muts be used together with `slice_pos`.
slice_pos(int): Position of the slice to be assigned to the new data iterator. Must be used together with `num_of_slices`.
slice_start(int): Starting position of the range to be sliced into new data iterator. Must be used together with `slice_end`.
slice_end(int) : End position of the range to be sliced into new data iterator. Must be used together with `slice_start`.
cache_dir(str) : Directory to save cache files
Example:
.. code-block:: python
from nnabla.utils.data_iterator import data_iterator_simple
import numpy as np
def load_func1(index):
d = np.ones((2, 2)) * index
return d
di = data_iterator_simple(load_func1, 1000, batch_size=3)
di_s1 = di.slice(None, num_of_slices=10, slice_pos=0)
di_s2 = di.slice(None, num_of_slices=10, slice_pos=1)
di_s3 = di.slice(None, slice_start=100, slice_end=200)
di_s4 = di.slice(None, slice_start=300, slice_end=400)
'''
if num_of_slices is not None and slice_pos is not None and slice_start is None and slice_end is None:
size = self._size // num_of_slices
amount = self._size % num_of_slices
slice_start = slice_pos * size
if slice_pos < amount:
slice_start += slice_pos
else:
slice_start += amount
slice_end = slice_start + size
if slice_end > self._size:
slice_start -= (slice_end - self._size)
slice_end = self._size
elif num_of_slices is None and slice_pos is None and slice_start is not None and slice_end is not None:
pass
else:
logger.critical(
'You must specify position(num_of_slice and slice_pos) or range(slice_start and slice_end).')
return None
if cache_dir is None:
ds = self._data_source
while '_data_source' in dir(ds):
if '_cache_dir' in dir(ds):
cache_dir = ds._cache_dir
ds = ds._data_source
if cache_dir is None:
return DataIterator(
DataSourceWithMemoryCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
shuffle=self._shuffle,
rng=rng),
self._batch_size)
else:
return DataIterator(
DataSourceWithMemoryCache(
DataSourceWithFileCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
cache_dir=cache_dir,
cache_file_name_prefix='cache_sliced_{:08d}_{:08d}'.format(
slice_start,
slice_end),
shuffle=self._shuffle,
rng=rng),
shuffle=self._shuffle,
rng=rng),
self._batch_size)
def _callback_epoch_end(self):
for callback in self._epoch_end_callbacks:
callback(self.epoch)
def _callback_epoch_begin(self):
for callback in self._epoch_begin_callbacks:
callback(self.epoch)
def register_epoch_end_callback(self, callback):
"""Register epoch end callback.
Args:
callback (function): A function takes an epoch index as an argument.
"""
self._epoch_end_callbacks.append(callback)
def register_epoch_begin_callback(self, callback):
"""Register epoch begin callback.
Args:
callback (function): A function takes an epoch index as an argument.
"""
self._epoch_begin_callbacks.append(callback)
def data_iterator(data_source,
batch_size,
rng=None,
with_memory_cache=True,
with_file_cache=False,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator
Helper method to use :py:class:`DataSource <nnabla.utils.data_source.DataSource>`.
You can use :py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>` with your own :py:class:`DataSource <nnabla.utils.data_source.DataSource>`
for easy implementation of data sources.
For example,
.. code-block:: python
ds = YourOwnImplementationOfDataSource()
batch = data_iterator(ds, batch_size)
Args:
data_source (:py:class:`DataSource <nnabla.utils.data_source.DataSource>`):
Instance of DataSource class which provides data.
batch_size (int): Batch size.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator.
'''
if with_file_cache:
ds = DataSourceWithFileCache(data_source=data_source,
cache_dir=cache_dir,
shuffle=data_source.shuffle,
rng=rng)
if with_memory_cache:
ds = DataSourceWithMemoryCache(ds,
shuffle=ds.shuffle,
rng=rng)
return DataIterator(ds,
batch_size,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
else:
if with_memory_cache:
data_source = DataSourceWithMemoryCache(data_source,
shuffle=data_source.shuffle,
rng=rng)
return DataIterator(data_source, batch_size,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
def data_iterator_simple(load_func,
num_examples,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
"""A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` .
It can unlimitedly yield minibatches at your request, queried from the provided data.
Args:
load_func (function): Takes a single argument `i`, an index of an
example in your dataset to be loaded, and returns a tuple of data.
Every call by any index `i` must return a tuple of arrays with
the same shape.
num_examples (int): Number of examples in your dataset. Random sequence
of indexes is generated according to this number.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator.
Here is an example of `load_func` which returns an image and a label of a
classification dataset.
.. code-block:: python
import numpy as np
from nnabla.utils.image_utils import imread
image_paths = load_image_paths()
labels = load_labels()
def my_load_func(i):
'''
Returns:
image: c x h x w array
label: 0-shape array
'''
img = imread(image_paths[i]).astype('float32')
return np.rollaxis(img, 2), np.array(labels[i])
"""
return data_iterator(SimpleDataSource(load_func,
num_examples,
shuffle=shuffle,
rng=rng),
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
def data_iterator_csv_dataset(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_csv_dataset
Get data directly from a dataset provided as a CSV file.
You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.
For example,
.. code-block:: python
batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)
Args:
uri (str): Location of dataset CSV file.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CsvDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
def data_iterator_cache(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_cache
Get data from the cache directory.
Cache files are read from the local file system.
For example,
.. code-block:: python
batch = data_iterator_cache('CACHE_DIR', batch_size, shuffle=True)
Args:
uri (str): Location of directory with cache files.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CacheDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
def data_iterator_concat_datasets(data_source_list,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=False,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_concat_datasets
Get data from multiple datasets.
For example,
.. code-block:: python
batch = data_iterator_concat_datasets([DataSource0, DataSource1, ...], batch_size)
Args:
data_source_list (list of DataSource): list of datasets.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = ConcatDataSource(data_source_list,
shuffle=shuffle,
rng=rng)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks)
|
test_consumer_client.py
|
import time
import pytest
import threading
import sys
from azure.eventhub import EventData
from azure.eventhub import EventHubConsumerClient
from azure.eventhub._eventprocessor.in_memory_checkpoint_store import InMemoryCheckpointStore
from azure.eventhub._constants import ALL_PARTITIONS
@pytest.mark.liveTest
def test_receive_no_partition(connstr_senders):
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
senders[1].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default', receive_timeout=1)
def on_event(partition_context, event):
on_event.received += 1
partition_context.update_checkpoint(event)
on_event.namespace = partition_context.fully_qualified_namespace
on_event.eventhub_name = partition_context.eventhub_name
on_event.consumer_group = partition_context.consumer_group
on_event.offset = event.offset
on_event.sequence_number = event.sequence_number
on_event.received = 0
on_event.namespace = None
on_event.eventhub_name = None
on_event.consumer_group = None
on_event.offset = None
on_event.sequence_number = None
with client:
worker = threading.Thread(target=client.receive,
args=(on_event,),
kwargs={"starting_position": "-1"})
worker.start()
time.sleep(10)
assert on_event.received == 2
checkpoints = list(client._event_processors.values())[0]._checkpoint_store.list_checkpoints(
on_event.namespace, on_event.eventhub_name, on_event.consumer_group
)
assert len([checkpoint for checkpoint in checkpoints if checkpoint["offset"] == on_event.offset]) > 0
assert len([checkpoint for checkpoint in checkpoints if checkpoint["sequence_number"] == on_event.sequence_number]) > 0
@pytest.mark.liveTest
def test_receive_partition(connstr_senders):
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
def on_event(partition_context, event):
on_event.received += 1
on_event.partition_id = partition_context.partition_id
on_event.consumer_group = partition_context.consumer_group
on_event.fully_qualified_namespace = partition_context.fully_qualified_namespace
on_event.eventhub_name = partition_context.eventhub_name
on_event.received = 0
with client:
worker = threading.Thread(target=client.receive,
args=(on_event,),
kwargs={"starting_position": "-1",
"partition_id": "0"})
worker.start()
time.sleep(10)
assert on_event.received == 1
assert on_event.partition_id == "0"
assert on_event.consumer_group == "$default"
assert on_event.fully_qualified_namespace in connection_str
assert on_event.eventhub_name == senders[0]._client.eventhub_name
@pytest.mark.liveTest
def test_receive_load_balancing(connstr_senders):
if sys.platform.startswith('darwin'):
pytest.skip("Skipping on OSX - test code using multiple threads. Sometimes OSX aborts python process")
connection_str, senders = connstr_senders
cs = InMemoryCheckpointStore()
client1 = EventHubConsumerClient.from_connection_string(
connection_str, consumer_group='$default', checkpoint_store=cs, load_balancing_interval=1)
client2 = EventHubConsumerClient.from_connection_string(
connection_str, consumer_group='$default', checkpoint_store=cs, load_balancing_interval=1)
def on_event(partition_context, event):
pass
with client1, client2:
worker1 = threading.Thread(target=client1.receive,
args=(on_event,),
kwargs={"starting_position": "-1"})
worker2 = threading.Thread(target=client2.receive,
args=(on_event,),
kwargs={"starting_position": "-1"})
worker1.start()
time.sleep(3.3)
worker2.start()
time.sleep(20)
assert len(client1._event_processors[("$default", ALL_PARTITIONS)]._consumers) == 1
assert len(client2._event_processors[("$default", ALL_PARTITIONS)]._consumers) == 1
def test_receive_batch_no_max_wait_time(connstr_senders):
'''Test whether callback is called when max_wait_time is None and max_batch_size has reached
'''
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
senders[1].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
def on_event_batch(partition_context, event_batch):
on_event_batch.received += len(event_batch)
partition_context.update_checkpoint()
on_event_batch.namespace = partition_context.fully_qualified_namespace
on_event_batch.eventhub_name = partition_context.eventhub_name
on_event_batch.consumer_group = partition_context.consumer_group
on_event_batch.offset = event_batch[-1].offset
on_event_batch.sequence_number = event_batch[-1].sequence_number
on_event_batch.received = 0
on_event_batch.namespace = None
on_event_batch.eventhub_name = None
on_event_batch.consumer_group = None
on_event_batch.offset = None
on_event_batch.sequence_number = None
with client:
worker = threading.Thread(target=client.receive_batch, args=(on_event_batch,),
kwargs={"starting_position": "-1"})
worker.start()
time.sleep(10)
assert on_event_batch.received == 2
checkpoints = list(client._event_processors.values())[0]._checkpoint_store.list_checkpoints(
on_event_batch.namespace, on_event_batch.eventhub_name, on_event_batch.consumer_group
)
assert len([checkpoint for checkpoint in checkpoints if checkpoint["offset"] == on_event_batch.offset]) > 0
assert len(
[checkpoint for checkpoint in checkpoints if checkpoint["sequence_number"] == on_event_batch.sequence_number]) > 0
worker.join()
@pytest.mark.parametrize("max_wait_time, sleep_time, expected_result",
[(3, 10, []),
(3, 2, None),
])
def test_receive_batch_empty_with_max_wait_time(connection_str, max_wait_time, sleep_time, expected_result):
'''Test whether event handler is called when max_wait_time > 0 and no event is received
'''
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
def on_event_batch(partition_context, event_batch):
on_event_batch.event_batch = event_batch
on_event_batch.event_batch = None
with client:
worker = threading.Thread(target=client.receive_batch, args=(on_event_batch,), kwargs={
"max_wait_time": max_wait_time, "starting_position": "-1"
})
worker.start()
time.sleep(sleep_time)
assert on_event_batch.event_batch == expected_result
worker.join()
def test_receive_batch_early_callback(connstr_senders):
''' Test whether the callback is called once max_batch_size reaches and before max_wait_time reaches.
'''
connection_str, senders = connstr_senders
for _ in range(10):
senders[0].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
def on_event_batch(partition_context, event_batch):
on_event_batch.received += len(event_batch)
on_event_batch.received = 0
with client:
worker = threading.Thread(target=client.receive_batch, args=(on_event_batch,), kwargs={
"max_batch_size": 10, "max_wait_time": 100, "starting_position": "-1", "partition_id": "0"
})
worker.start()
time.sleep(10)
assert on_event_batch.received == 10
worker.join()
|
cleanup.py
|
"""
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
model.objects.filter(expires_at__lt=timezone.now()).delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo(
"NodeStore backend does not support cleanup operation", err=True)
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
spawn_n_terminate.py
|
import time, os, signal
from multiprocessing import Process
from main_proc_spoof import counter, counter_p
def proc_factory(fname,var):
return Process(target=fname, args=(var,))
def run_main(proc):
try:
proc.start()
except KeyboardInterrupt:
proc.terminate()
proc.join()
proc.close()
print('Counter now at: ' + str(c_var))
print('Running process with PID: ' + str(proc.pid))
return proc.pid
def term_main(pid):
#proc.terminate()
#proc.join()
os.kill(pid, signal.SIGKILL)
#os.kill(pid, signal.SIGINT)
time.sleep(2)
try:
os.kill(pid, 0)
except OSError:
print('Terminating process with PID: ' + str(pid))
else:
print('Process ' + str(pid) + ' is still running.')
def delayed_print(var):
time.sleep(2)
print('Counter now at: ' + str(var))
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
if __name__ == '__main__':
c = 0
#c2 = 0
#p2 = Process(target=counter_p, args=(c2,))
pid = run_main(proc_factory(counter,c))
#run_main(p2)
time.sleep(2)
term_main(pid)
#term_main(p2)
time.sleep(1)
#if check_pid(pid):
c = 0
print('Counter back to ' + str(c))
time.sleep(5)
pid = run_main(proc_factory(counter,c))
time.sleep(2)
term_main(pid)
delayed_print(c)
|
socket.py
|
import abc
import json
import logging
import os
import socket
from collections import namedtuple
from threading import Thread
from types import coroutine
from typing import List
from taro import paths
log = logging.getLogger(__name__)
class SocketServer(abc.ABC):
def __init__(self, socket_name):
self._socket_name = socket_name
self._server: socket = None
self._stopped = False
def start(self) -> bool:
if self._stopped:
return False
try:
socket_path = paths.socket_path(self._socket_name, create=True)
except FileNotFoundError as e:
log.error("event=[unable_create_socket_dir] socket_dir=[%s] message=[%s]", e.filename, e)
return False
self._server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
self._server.bind(str(socket_path))
Thread(target=self.serve, name='Thread-ApiServer').start()
return True
except PermissionError as e:
log.error("event=[unable_create_socket] socket=[%s] message=[%s]", socket_path, e)
return False
def serve(self):
log.debug('event=[server_started]')
while not self._stopped:
datagram, client_address = self._server.recvfrom(16384)
if not datagram:
break
req_body = json.loads(datagram)
resp_body = self.handle(req_body)
if resp_body:
if client_address:
self._server.sendto(json.dumps(resp_body).encode(), client_address)
else:
log.warning('event=[missing_client_address]')
log.debug('event=[server_stopped]')
@abc.abstractmethod
def handle(self, req_body):
"""
Handle request and optionally return response
:return: response body or None if no response
"""
def stop(self):
self._stopped = True
def close(self):
self.stop()
if self._server is None:
return
socket_name = self._server.getsockname()
try:
self._server.shutdown(socket.SHUT_RD)
self._server.close()
finally:
if os.path.exists(socket_name):
os.remove(socket_name)
InstanceResponse = namedtuple('InstanceResponse', 'instance response')
class SocketClient:
def __init__(self, file_extension: str, bidirectional: bool):
self._file_extension = file_extension
self._bidirectional = bidirectional
self._client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
if bidirectional:
self._client.bind(self._client.getsockname())
self.dead_sockets = []
@coroutine
def servers(self, include=()):
req_body = '_' # Dummy initialization to remove warnings
resp = None
skip = False
for api_file in paths.socket_files(self._file_extension):
instance_id = api_file.stem
if (api_file in self.dead_sockets) or (include and instance_id not in include):
continue
while True:
if not skip:
req_body = yield resp
skip = False # reset
if not req_body:
break # next(this) called -> proceed to the next server
try:
self._client.sendto(json.dumps(req_body).encode(), str(api_file))
if self._bidirectional:
datagram = self._client.recv(16384)
resp = InstanceResponse(instance_id, json.loads(datagram.decode()))
except ConnectionRefusedError: # TODO what about other errors?
log.warning('event=[dead_socket] socket=[{}]'.format(api_file))
self.dead_sockets.append(api_file)
skip = True # Ignore this one and continue with another one
break
def communicate(self, req, include=()) -> List[InstanceResponse]:
server = self.servers(include=include)
responses = []
while True:
try:
next(server)
responses.append(server.send(req)) # StopIteration is raised from this function if last socket is dead
except StopIteration:
break
return responses
def close(self):
self._client.shutdown(socket.SHUT_RDWR)
self._client.close()
|
setup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
print '''
Free anyZipcrack-dictionary created by:
pyc0d3r: http://www.umarbrowser.co.vu/
'''
#imports
import zipfile
import optparse
from threading import Thread
#Try extarct if found password
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print '[+] Found password ' + password + '\n'
except:
pass
# main thats gives an interace using optparse
def main():
parser = optparse.OptionParser("usage %prog "+\
"-f <zipfile> -d <dictionary>")
parser.add_option('-f', dest='zname', type='string',\
help='specify zip file')
parser.add_option('-d', dest='dname', type='string',\
help='specify dictionary file')
(options, args) = parser.parse_args()
if (options.zname == None) | (options.dname == None):
print parser.usage
exit(0)
else:
zname = options.zname
dname = options.dname
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
|
run_manager.py
|
# -*- encoding: utf-8 -*-
import errno
import json
import logging
import os
import re
import signal
import socket
import stat
import subprocess
import sys
import time
from tempfile import NamedTemporaryFile
import threading
import yaml
import numbers
import inspect
import glob
import platform
import fnmatch
import click
from pkg_resources import parse_version
import six
from six.moves import queue
import requests
from watchdog.observers.polling import PollingObserver
from watchdog.events import PatternMatchingEventHandler
import webbrowser
import wandb
from wandb.apis import file_stream
from wandb import __version__
from wandb import env as wandb_env
from wandb import Error
from wandb import io_wrap
from wandb import jsonlfile
from wandb import file_pusher
from wandb import meta
from wandb.core import START_TIME
from wandb import sparkline
from wandb import stats
from wandb import streaming_log
from wandb import util
from wandb import wandb_config as config
from wandb import wandb_run
from wandb import wandb_socket
from wandb.compat import windows
from wandb.apis import InternalApi
from wandb.apis import CommError
logger = logging.getLogger(__name__)
class LaunchError(Error):
"""Raised when there's an error starting up."""
class FileTailer(object):
def __init__(self, path, on_read_fn, binary=False, seek_end=False):
self._path = path
mode = 'r'
if binary:
mode = 'rb'
self._file = open(path, mode)
if seek_end:
self._file.seek(0, 2) # seek to 0 bytes from end (2 means end)
self._on_read_fn = on_read_fn
self.running = True
self._thread = threading.Thread(target=self._thread_body)
self._thread.start()
def _thread_body(self):
while self.running:
where = self._file.tell()
data = self._file.read(1024)
if not data:
time.sleep(1)
# required for to get python2 working (Issue #50)
self._file.seek(where)
else:
self._on_read_fn(data)
data = self._file.read()
if data:
self._on_read_fn(data)
def stop(self):
self.running = False
self._thread.join()
self._file.close()
class FileEventHandler(object):
def __init__(self, file_path, save_name, api, *args, **kwargs):
self.file_path = file_path
# Convert windows paths to unix paths
save_name = util.to_forward_slash_path(save_name)
self.save_name = save_name
self._api = api
def on_created(self):
pass
def on_modified(self):
pass
def on_renamed(self, new_path, new_name):
self.file_path = new_path
self.save_name = new_name
def finish(self):
pass
class FileEventHandlerOverwrite(FileEventHandler):
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerOverwrite, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
def on_created(self):
self.on_modified()
def on_modified(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerOverwriteOnce(FileEventHandler):
"""This file handler is meant for files like metadata which may update during the run but should be uploaded upon creation"""
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerOverwriteOnce, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
def on_created(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
def finish(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerThrottledOverwrite(FileEventHandler):
"""This file handler uploads the file atmost every 15 seconds and only if it's size has increased by 20%"""
# Don't upload
RATE_LIMIT_SECONDS = 15
# Wait to upload until size has increased 20% from last upload
RATE_LIMIT_SIZE_INCREASE = 1.2
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerThrottledOverwrite, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
self._last_uploaded_time = None
self._last_uploaded_size = 0
def on_created(self):
self.on_modified()
@property
def current_size(self):
return os.path.getsize(self.file_path)
def on_modified(self):
# Don't upload anything if it's zero size.
if self.current_size == 0:
return 0
if self._last_uploaded_time:
# Check rate limit by time elapsed
time_elapsed = time.time() - self._last_uploaded_time
if time_elapsed < self.RATE_LIMIT_SECONDS:
return time_elapsed
# Check rate limit by size increase
size_increase = self.current_size / float(self._last_uploaded_size)
if size_increase < self.RATE_LIMIT_SIZE_INCREASE:
return time_elapsed
self.save_file()
return 0
def finish(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
def save_file(self):
self._last_uploaded_time = time.time()
self._last_uploaded_size = self.current_size
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerThrottledOverwriteMinWait(FileEventHandlerThrottledOverwrite):
"""This event handler will upload files every N seconds as it changes throttling as the size increases"""
TEN_MB = 10000000
HUNDRED_MB = 100000000
ONE_GB = 1000000000
def min_wait_for_size(self, size):
if self.current_size < self.TEN_MB:
return 60
elif self.current_size < self.HUNDRED_MB:
return 5 * 60
elif self.current_size < self.ONE_GB:
return 10 * 60
else:
return 20 * 60
def on_modified(self):
time_elapsed = super(FileEventHandlerThrottledOverwriteMinWait, self).on_modified()
# Check max elapsed time
if time_elapsed > self.min_wait_for_size(self.current_size):
self.save_file()
class FileEventHandlerOverwriteDeferred(FileEventHandler):
"""This file handler only updates at the end of the run"""
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerOverwriteDeferred, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
def finish(self):
# We use copy=False to avoid possibly expensive copies, and because
# user files shouldn't still be changing at the end of the run.
self._file_pusher.file_changed(self.save_name, self.file_path, copy=False)
class FileEventHandlerConfig(FileEventHandler):
"""Set the config instead of uploading the file"""
RATE_LIMIT_SECONDS = 30
def __init__(self, file_path, save_name, api, file_pusher, run, *args, **kwargs):
self._api = api
super(FileEventHandlerConfig, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._last_sent = time.time() - self.RATE_LIMIT_SECONDS
self._file_pusher = file_pusher
self._run = run
self._thread = None
def on_created(self):
self._eventually_update()
def on_modified(self):
self._eventually_update()
def _eventually_update(self):
if self._thread:
# assume the existing thread will catch this update
return
if time.time() - self._last_sent >= self.RATE_LIMIT_SECONDS:
self._update()
else:
self._thread = threading.Timer(
self.RATE_LIMIT_SECONDS, self._thread_update)
self._thread.start()
def _thread_update(self):
try:
self._update()
finally:
self._thread = None
def _update(self):
try:
with open(self.file_path) as f:
config_dict = util.load_yaml(f)
except yaml.parser.ParserError:
wandb.termlog(
"Unable to parse config file; probably being modified by user process?")
return
# TODO(adrian): ensure the file content will exactly match Bucket.config
# ie. push the file content as a string
self._api.upsert_run(id=self._run.storage_id, config=config_dict)
self._file_pusher.file_changed(self.save_name, self.file_path)
self._last_sent = time.time()
def finish(self):
if self._thread:
# Cancel the current thread to keep moving
self._thread.cancel()
self._thread = None
self._update()
class FileEventHandlerSummary(FileEventHandler):
"""Read the file and add to the file push api"""
def __init__(self, file_path, save_name, api, file_pusher, run, *args, **kwargs):
super(FileEventHandlerSummary, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._api = api
self._file_pusher = file_pusher
def on_created(self):
self.on_modified()
def on_modified(self):
with open(self.file_path) as f:
self._api.get_file_stream_api().push(self.save_name, f.read())
def finish(self):
with open(self.file_path) as f:
self._api.get_file_stream_api().push(self.save_name, f.read())
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerTextStream(FileEventHandler):
def __init__(self, *args, **kwargs):
self._seek_end = kwargs.pop('seek_end', None)
super(FileEventHandlerTextStream, self).__init__(*args, **kwargs)
self._tailer = None
if self._seek_end:
# We need to call _setup up in the case of resumed runs
# because we will start logging immediatly, so on_modified
# would seek the FileTailer to after the most recent log
self._setup()
def on_created(self):
if self._tailer:
logger.error(
'Streaming file created twice in same run: %s', self.file_path)
return
self._setup()
def on_modified(self):
if self._tailer:
return
self._setup()
def _setup(self):
fsapi = self._api.get_file_stream_api()
pusher = streaming_log.TextStreamPusher(fsapi, self.save_name)
def on_read(data):
pusher.write_string(data)
self._tailer = FileTailer(
self.file_path, on_read, seek_end=self._seek_end)
def finish(self):
if self._tailer:
self._tailer.stop()
self._tailer = None
class FileEventHandlerBinaryStream(FileEventHandler):
def __init__(self, *args, **kwargs):
super(FileEventHandlerBinaryStream, self).__init__(*args, **kwargs)
self._tailer = None
def on_created(self):
if self._tailer:
logger.error(
'Streaming file created twice in same run: %s', self.file_path)
return
self._setup()
def on_modified(self):
if self._tailer:
return
self._setup()
def _setup(self):
fsapi = self._api.get_file_stream_api()
def on_read(data):
fsapi.push(self.save_name, data)
self._tailer = FileTailer(self.file_path, on_read, binary=True)
class WriteSerializingFile(object):
"""Wrapper for a file object that serializes writes.
"""
def __init__(self, f):
self.lock = threading.Lock()
self.f = f
def write(self, *args, **kargs):
self.lock.acquire()
try:
self.f.write(*args, **kargs)
self.f.flush()
finally:
self.lock.release()
class Process(object):
"""Represents a running process with an interface that
mimics Popen's.
Only works on Unix-y systems.
TODO(adrian): probably rewrite using psutil.Process
"""
def __init__(self, pid):
self.returncode = None
self.pid = pid
def poll(self):
if self.returncode is None:
try:
if platform.system() == "Windows":
if windows.pid_running(self.pid) == False:
raise OSError(0, "Process isn't running")
else:
os.kill(self.pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
# we have no way of getting the real return code, so just set it to 0
self.returncode = 0
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
pass
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
return self.returncode
def wait(self):
while self.poll() is None:
time.sleep(1)
def interrupt(self):
os.kill(self.pid, signal.SIGINT)
def terminate(self):
os.kill(self.pid, signal.SIGTERM)
def kill(self):
os.kill(self.pid, signal.SIGKILL)
def format_run_name(run):
"Simple helper to not show display name if its the same as id"
return " "+run.name+":" if run.name and run.name != run.id else ":"
class RunStatusChecker(object):
"""Polls the backend periodically to check on this run's status.
For now, we just use this to figure out if the user has requested a stop.
TODO(adrnswanberg): Use this as more of a general heartbeat check.
"""
def __init__(self, run, api, stop_requested_handler, polling_interval=15):
self._run = run
self._api = api
self._polling_interval = polling_interval
self._stop_requested_handler = stop_requested_handler
self._shutdown_event = threading.Event()
self._thread = threading.Thread(target=self.check_status)
self._thread.start()
def check_status(self):
shutdown_requested = False
while not shutdown_requested:
try:
should_exit = self._api.check_stop_requested(
project_name=self._run.project_name(),
entity_name=self._run.entity,
run_id=self._run.id)
except CommError as e:
logger.exception("Failed to check stop requested status: %s" % e.exc)
should_exit = False
except:
logger.exception("An unknown error occurred while checking stop requested status. Continuing anyway..")
should_exit = False
if should_exit:
self._stop_requested_handler()
return
else:
shutdown_requested = self._shutdown_event.wait(self._polling_interval)
def shutdown(self):
self._shutdown_event.set()
self._thread.join()
class RunManager(object):
"""Manages a run's process, wraps its I/O, and synchronizes its files.
"""
CRASH_NOSYNC_TIME = 30
def __init__(self, run, project=None, tags=[], cloud=True, output=True, port=None):
self._run = run
self._tags = tags
self._cloud = cloud
self._output = output
self._port = port
# Connect to the server early to let it know we are starting up
self._socket = wandb_socket.Client(self._port)
self._api = run.api
self._project = self._resolve_project_name(project)
self._config = run.config
self._file_count = 0
self._init_file_observer()
# Calling .start() on _meta and _system_stats will spin a thread that reports system stats every 30 seconds
self._system_stats = stats.SystemStats(run, self._api)
self._meta = meta.Meta(self._api, self._run.dir)
self._meta.data["jobType"] = self._run.job_type
self._meta.data["mode"] = self._run.mode
if self._run.name:
self._meta.data["name"] = self._run.name
if self._run.notes:
self._meta.data["notes"] = self._run.notes
if self._project:
self._meta.data["project"] = self._project
if self._run.program:
self._meta.data["program"] = self._run.program
self._meta.data["args"] = self._run.args
# Set code path in config
if self._meta.data.get("codePath"):
self._config._set_wandb("code_path", util.to_forward_slash_path(
os.path.join("code", self._meta.data["codePath"])))
self._config.persist()
# Write our initial metadata after overriding the defaults
self._meta.write()
self._tensorboard_watchers = []
self._tensorboard_consumer = None
self._tensorboard_lock = threading.Lock()
self._watcher_queue = queue.PriorityQueue()
# We'll conditionally create one of these when running in headless mode.
self._run_status_checker = None
# This allows users to specify files they want uploaded during the run
self._user_file_policies = {
"end": [],
"live": []
}
self._file_policy_lock = threading.Lock()
logger.debug("Initialized sync for %s/%s", self._project, self._run.id)
def _resolve_project_name(self, project_name=None):
if project_name is not None:
return project_name
project_name = self._api.settings('project')
if project_name is not None:
return project_name
project_name = self._run.auto_project_name(self._api)
if project_name is not None:
return project_name
""" FILE SYNCING / UPLOADING STUFF """
def _init_file_observer(self):
self._file_pusher = file_pusher.FilePusher(self._api)
# FileEventHandlers (any of the classes at the top) indexed by "save_name," which is the file's path relative to the run directory
self._file_event_handlers = {}
# We use the polling observer because inotify was flaky and could require changes to sysctl.conf
self._file_observer = PollingObserver()
self._file_observer.schedule(self._per_file_event_handler(), self._run.dir, recursive=True)
# We lock this when the back end is down so Watchdog will keep track of all
# the file events that happen. Then, when the back end comes back up, we unlock
# it so all the outstanding events will get handled properly. Watchdog's queue
# only keeps at most one event per file.
self._file_observer_lock = threading.Lock()
# It starts acquired. We release it when we want to allow the events to happen.
# (ie. after the Run is successfully created)
self._block_file_observer()
# Start watching for file changes right away so we can be sure we don't miss anything.
# We don't have to worry about handlers actually being called because of the lock.
self._file_observer.start()
@property
def emitter(self):
try:
return next(iter(self._file_observer.emitters))
except StopIteration:
return None
@property
def run(self):
return self._run
def _per_file_event_handler(self):
"""Create a Watchdog file event handler that does different things for every file
"""
file_event_handler = PatternMatchingEventHandler()
file_event_handler.on_created = self._on_file_created
file_event_handler.on_modified = self._on_file_modified
file_event_handler.on_moved = self._on_file_moved
file_event_handler._patterns = [
os.path.join(self._run.dir, os.path.normpath('*'))]
# Ignore hidden files/folders
file_event_handler._ignore_patterns = [
'*.tmp',
os.path.join(self._run.dir, ".*"),
os.path.join(self._run.dir, "*/.*"),
]
for glob in self._api.settings("ignore_globs"):
file_event_handler._ignore_patterns.append(
os.path.join(self._run.dir, glob))
return file_event_handler
def _block_file_observer(self):
self._file_observer_lock.acquire()
def _unblock_file_observer(self):
self._file_observer_lock.release()
def _ensure_file_observer_is_unblocked(self):
self._block_file_observer()
self._unblock_file_observer()
def _end_file_syncing(self, exitcode):
try:
# avoid hanging if we crashed before the observer was started
if self._file_observer.is_alive():
# rather unfortunatly we need to manually do a final scan of the dir
# with `queue_events`, then iterate through all events before stopping
# the observer to catch all files written. First we need to prevent the
# existing thread from consuming our final events, then we process each one.
self._file_observer._timeout = 0
self._file_observer._stopped_event.set()
self._file_observer.join()
self.emitter.queue_events(0)
while True:
try:
self._file_observer.dispatch_events(self._file_observer.event_queue, 0)
except queue.Empty:
break
# Calling stop unschedules any inflight events so we manually handled them above
self._file_observer.stop()
# TODO: py2 TypeError: PyCObject_AsVoidPtr called with null pointer
except TypeError:
pass
# TODO: py3 SystemError: <built-in function stop> returned a result with an error set
except SystemError:
pass
# Ensure we've at least noticed every file in the run directory. Sometimes
# we miss things because asynchronously watching filesystems isn't reliable.
ignore_globs = self._api.settings("ignore_globs")
for dirpath, _, filenames in os.walk(self._run.dir):
for fname in filenames:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._run.dir)
if any([fnmatch.fnmatch(save_name, glob) for glob in ignore_globs]):
continue
if save_name not in self._file_event_handlers:
self._get_file_event_handler(file_path, save_name).on_created()
"""Stops file syncing/streaming but doesn't actually wait for everything to
finish. We print progress info later.
"""
# TODO: there was a case where _file_event_handlers was getting modified in the loop.
for handler in list(self._file_event_handlers.values()):
handler.finish()
self._file_pusher.finish()
self._api.get_file_stream_api().finish(exitcode)
# In Jupyter notebooks, wandb.init can be called multiple times in the same
# process, creating new runs each time. This ensures we get a new file stream
# thread
self._api._file_stream_api = None
# TODO: limit / throttle the number of adds / pushes
def _on_file_created(self, event):
logger.info('file/dir created: %s', event.src_path)
if os.path.isdir(event.src_path):
return None
self._file_count += 1
if self._file_count % 100 == 0:
self.emitter._timeout = int(self._file_count / 100) + 1
save_name = os.path.relpath(event.src_path, self._run.dir)
self._ensure_file_observer_is_unblocked()
self._get_file_event_handler(event.src_path, save_name).on_created()
def _on_file_modified(self, event):
logger.info('file/dir modified: %s', event.src_path)
if os.path.isdir(event.src_path):
return None
save_name = os.path.relpath(event.src_path, self._run.dir)
self._ensure_file_observer_is_unblocked()
self._get_file_event_handler(event.src_path, save_name).on_modified()
def _on_file_moved(self, event):
logger.info('file/dir moved: %s -> %s',
event.src_path, event.dest_path)
if os.path.isdir(event.dest_path):
return None
old_save_name = os.path.relpath(event.src_path, self._run.dir)
new_save_name = os.path.relpath(event.dest_path, self._run.dir)
self._ensure_file_observer_is_unblocked()
# We have to move the existing file handler to the new name, and update the stats
handler = self._get_file_event_handler(event.src_path, old_save_name)
self._file_event_handlers[new_save_name] = handler
del self._file_event_handlers[old_save_name]
self._file_pusher.rename_file(old_save_name, new_save_name, event.dest_path)
handler.on_renamed(event.dest_path, new_save_name)
def _get_file_event_handler(self, file_path, save_name):
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._api.get_file_stream_api().set_file_policy(save_name, file_stream.JsonlFilePolicy())
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._api.get_file_stream_api().set_file_policy(save_name, file_stream.JsonlFilePolicy())
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# file_stream.BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, file_stream.SummaryFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/') or save_name.startswith('code/') or save_name in ["requirements.txt", "diff.patch"]:
# Save media files and special wandb files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
elif save_name == meta.METADATA_FNAME:
self._file_event_handlers[save_name] = FileEventHandlerOverwriteOnce(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name]
""" RUN MANAGEMENT STUFF """
def mirror_stdout_stderr(self):
"""Simple STDOUT and STDERR mirroring used by _init_jupyter"""
# TODO: Ideally we could start collecting logs without pushing
fs_api = self._api.get_file_stream_api()
io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher(
fs_api, util.OUTPUT_FNAME, prepend_timestamp=True))
io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher(
fs_api, util.OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR'))
def unmirror_stdout_stderr(self):
# Python 2 tests were failing...
if hasattr(sys.stdout, "orig_write"):
sys.stdout.write = sys.stdout.orig_write
sys.stderr.write = sys.stderr.orig_write
def _get_stdout_stderr_streams(self):
"""Sets up STDOUT and STDERR streams. Only call this once."""
if six.PY2 or not hasattr(sys.stdout, "buffer"):
if hasattr(sys.stdout, "fileno") and sys.stdout.isatty():
try:
stdout = os.fdopen(sys.stdout.fileno(), "w+", 0)
stderr = os.fdopen(sys.stderr.fileno(), "w+", 0)
# OSError [Errno 22] Invalid argument wandb
except OSError:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = sys.stdout
stderr = sys.stderr
else: # we write binary so grab the raw I/O objects in python 3
try:
stdout = sys.stdout.buffer.raw
stderr = sys.stderr.buffer.raw
except AttributeError:
# The testing environment and potentially others may have screwed with their
# io so we fallback to raw stdout / err
stdout = sys.stdout.buffer
stderr = sys.stderr.buffer
output_log_path = os.path.join(self._run.dir, util.OUTPUT_FNAME)
self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))
stdout_streams = [stdout, self._output_log]
stderr_streams = [stderr, self._output_log]
if self._cloud:
# Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
fs_api = self._api.get_file_stream_api()
self._stdout_stream = streaming_log.TextStreamPusher(
fs_api, util.OUTPUT_FNAME, prepend_timestamp=True)
self._stderr_stream = streaming_log.TextStreamPusher(
fs_api, util.OUTPUT_FNAME, line_prepend='ERROR',
prepend_timestamp=True)
stdout_streams.append(self._stdout_stream)
stderr_streams.append(self._stderr_stream)
return stdout_streams, stderr_streams
def _close_stdout_stderr_streams(self):
"""Close output-capturing stuff. This also flushes anything left in
the buffers.
"""
# we don't have tee_file's in headless mode
if self._stdout_tee.tee_file is not None:
self._stdout_tee.tee_file.close()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
# TODO(adrian): we should close these even in headless mode
# but in python 2 the read thread doesn't stop on its own
# for some reason
self._stdout_tee.close_join()
self._stderr_tee.close_join()
if self._cloud:
# not set in dry run mode
self._stdout_stream.close()
self._stderr_stream.close()
self._output_log.f.close()
self._output_log = None
def _setup_resume(self, resume_status):
# write the tail of the history file
try:
history_tail = json.loads(resume_status['historyTail'])
jsonlfile.write_jsonl_file(os.path.join(self._run.dir, wandb_run.HISTORY_FNAME),
history_tail)
except ValueError:
logger.error("Couldn't parse history")
wandb.termwarn("Couldn't load recent history, resuming may not function properly")
# write the tail of the events file
try:
events_tail = json.loads(resume_status['eventsTail'])
jsonlfile.write_jsonl_file(os.path.join(self._run.dir, wandb_run.EVENTS_FNAME),
events_tail)
except ValueError:
logger.error("Couldn't parse system metrics / events")
# load the previous runs summary to avoid losing it, the user process will need to load it
self._run.summary.update(json.loads(resume_status['summaryMetrics'] or "{}"))
# load the previous runs config
self._run.config.load_json(json.loads(resume_status.get('config') or "{}"))
self._run.config.persist()
# Note: these calls need to happen after writing the files above. Because the access
# to self._run.events below triggers events to initialize, but we need the previous
# events to be written before that happens.
# output.log
self._api.get_file_stream_api().set_file_policy(
util.OUTPUT_FNAME, file_stream.CRDedupeFilePolicy(resume_status['logLineCount']))
# history
self._api.get_file_stream_api().set_file_policy(
wandb_run.HISTORY_FNAME, file_stream.JsonlFilePolicy(
start_chunk_id=resume_status['historyLineCount']))
self._file_event_handlers[wandb_run.HISTORY_FNAME] = FileEventHandlerTextStream(
self._run.history.fname, wandb_run.HISTORY_FNAME, self._api, seek_end=resume_status['historyLineCount'] > 0)
# events
self._api.get_file_stream_api().set_file_policy(
wandb_run.EVENTS_FNAME, file_stream.JsonlFilePolicy(
start_chunk_id=resume_status['eventsLineCount']))
self._file_event_handlers[wandb_run.EVENTS_FNAME] = FileEventHandlerTextStream(
self._run.events.fname, wandb_run.EVENTS_FNAME, self._api, seek_end=resume_status['eventsLineCount'] > 0)
def init_run(self, env=None):
"""Ensure we create a Run (Bucket) object
We either create it now or, if the API call fails for some reason (eg.
the network is down), we do it from a thread that we start. We hold
off file syncing and streaming until it succeeds.
Returns the initial step of the run, or None if we didn't create a run
"""
io_wrap.init_sigwinch_handler()
self._check_update_available(__version__)
if self._output:
wandb.termlog("Run data is saved locally in %s" % os.path.relpath(self._run.dir))
self._system_stats.start()
self._meta.start()
logger.info("system metrics and metadata threads started")
new_step = None
if self._cloud:
storage_id = None
if self._run.resume != 'never':
# DNS can hang for 60 seconds, we check for resume status in a thread
# TODO: Ideally this thread would continue retrying in case of failure.
# Currently we assume we're not resuming in the case of resume = auto,
# and we throw an error in the case of resume = must.
logger.info("checking resume status, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
if not self._project:
raise LaunchError(
"resume='must' but no project is specified. Pass project to init: wandb.init(project=\"...\")")
async_resume_status = util.async_call(self._api.run_resume_status, InternalApi.HTTP_TIMEOUT)
resume_status, thread = async_resume_status(self._api.settings("entity"), self._project, self._run.id)
if resume_status == None and self._run.resume == 'must':
if thread.is_alive():
raise LaunchError(
"resume='must' but we were unable to connect to the W&B service after %i seconds" % InternalApi.HTTP_TIMEOUT)
else:
raise LaunchError(
"resume='must' but run (%s) doesn't exist" % self._run.id)
if resume_status:
storage_id = resume_status['id']
logger.info("resuming run from id: %s" % storage_id)
self._project = self._resolve_project_name(self._project)
self._setup_resume(resume_status)
try:
history = json.loads(json.loads(resume_status['historyTail'])[-1])
except (IndexError,ValueError):
history = {}
new_step = history.get("_step", 0)
else:
new_step = 0
# DNS lookups can hang for upto 60 seconds, we wait for HTTP_TIMEOUT (10s)
logger.info("upserting run before process can begin, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
async_upsert = util.async_call(self._upsert_run, timeout=InternalApi.HTTP_TIMEOUT)
_, self._upsert_run_thread = async_upsert(True, storage_id, env)
if self._upsert_run_thread.is_alive():
logger.error("Failed to connect to W&B servers after %i seconds.\
Letting user process proceed while attempting to reconnect." % InternalApi.HTTP_TIMEOUT)
return new_step
def _upsert_run(self, retry, storage_id, env):
"""Upsert the Run (ie. for the first time with all its attributes)
Arguments:
retry: (bool) Whether to retry if the connection fails (ie. if the backend is down).
False is useful so we can start running the user process even when the W&B backend
is down, and let syncing finish later.
Returns:
True if the upsert succeeded, False if it failed because the backend is down.
Throws:
LaunchError on other failures
"""
if retry:
num_retries = None
else:
num_retries = 0 # no retries because we want to let the user process run even if the backend is down
try:
self._run.save(
id=storage_id, num_retries=num_retries, api=self._api)
except CommError as e:
logger.exception("communication error with wandb %s" % e.exc)
# TODO: Get rid of str contains check
if self._run.resume == 'never' and 'exists' in str(e):
raise LaunchError(
"resume='never' but run (%s) exists" % self._run.id)
else:
# Detect bad request code -- this is usually trying to
# create a run that has been already deleted
if (isinstance(e.exc, requests.exceptions.HTTPError) and
e.exc.response.status_code == 400):
raise LaunchError(
'Failed to connect to W&B. See {} for details.'.format(
util.get_log_file_path()))
if isinstance(e.exc, (requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError)):
wandb.termerror(
'Failed to connect to W&B. Retrying in the background.')
return False
launch_error_s = 'Launch exception: {}\nTo disable wandb syncing set WANDB_MODE=dryrun'.format(e)
raise LaunchError(launch_error_s)
if self._output:
if self._run.resumed:
run_state_str = "Resuming run"
else:
run_state_str = "Syncing run"
wandb.termlog("{} {}".format(run_state_str, click.style(self._run.name, fg="yellow")))
try:
url = self._run.get_url(self._api)
emojis = {}
if platform.system() != "Windows":
emojis = dict(star="⭐️", broom="🧹", rocket="🚀")
project_url = self._run.get_project_url(self._api)
wandb.termlog("{} View project at {}".format(
emojis.get("star", ""),
click.style(project_url, underline=True, fg='blue')))
sweep_url = self._run.get_sweep_url(self._api)
if sweep_url:
wandb.termlog("{} View sweep at {}".format(
emojis.get("broom", ""),
click.style(sweep_url, underline=True, fg='blue')))
wandb.termlog("{} View run at {}".format(
emojis.get("rocket", ""),
click.style(url, underline=True, fg='blue')))
except CommError as e:
wandb.termwarn(e.message)
wandb.termlog("Run `wandb off` to turn off syncing.")
env = self._run.set_environment(environment=env)
if wandb_env.should_save_code():
logger.info("saving patches")
self._api.save_patches(self._run.dir)
if env.get("SPELL_RUN_URL"):
self._api.sync_spell(self._run, env)
logger.info("saving pip packages")
self._api.save_pip(self._run.dir)
logger.info("initializing streaming files api")
self._api.get_file_stream_api().set_default_file_policy(
util.OUTPUT_FNAME, file_stream.CRDedupeFilePolicy())
self._api.get_file_stream_api().start()
self._project = self._api.settings("project")
# unblock file syncing and console streaming, which need the Run to have a .storage_id
logger.info("unblocking file change observer, beginning sync with W&B servers")
self._unblock_file_observer()
return True
def shutdown(self, exitcode=0):
"""Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor"""
logger.info("shutting down system stats and metadata service")
self._system_stats.shutdown()
self._meta.shutdown()
for watcher in self._tensorboard_watchers:
watcher.shutdown()
if self._tensorboard_consumer:
self._tensorboard_consumer.shutdown()
if self._run_status_checker:
self._run_status_checker.shutdown()
self._run.history.close()
if self._cloud:
logger.info("stopping streaming files and file change observer")
self._end_file_syncing(exitcode)
def run_user_process(self, program, args, env):
"""Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
if platform.system() == "Windows":
# PTYs don't work in windows so we use pipes.
self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
# Seems like the following actually isn't necessary on Windows
# TODO(adrian): we may need to do the following if we use pipes instead of PTYs
# because Python on Unix doesn't like writing UTF-8 to files
# tell child python interpreters we accept utf-8
# env['PYTHONIOENCODING'] = 'UTF-8'
else:
self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)
command = [program] + list(args)
runner = util.find_runner(program)
if runner:
command = runner + command
if platform.system() == "Windows":
command = ' '.join(windows.quote_arg(arg) for arg in command)
else:
command = ' '.join(six.moves.shlex_quote(arg) for arg in command)
self._stdout_stream.write_string(command + "\n\n")
try:
self.proc = subprocess.Popen(
command,
env=env,
stdout=self._stdout_tee.tee_file,
stderr=self._stderr_tee.tee_file,
shell=True,
)
self._run.pid = self.proc.pid
except (OSError, IOError):
raise Exception('Could not find program: %s' % command)
self._sync_etc()
def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None):
"""Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
self._run.pid = pid
logger.info("wrapping existing process %i" % pid)
try:
self.init_run()
except LaunchError as e:
logger.exception("catostrophic launch error")
wandb.termerror(str(e))
util.sentry_exc(e)
self._socket.launch_error()
return
if io_wrap.SIGWINCH_HANDLER is not None:
# SIGWINCH_HANDLER (maybe) gets set in self.init_run()
io_wrap.SIGWINCH_HANDLER.add_fd(stdout_read_fd)
io_wrap.SIGWINCH_HANDLER.add_fd(stderr_read_fd)
# Signal the main process that we're all hooked up
logger.info("informing user process we are ready to proceed")
self._socket.ready()
self._sync_etc(headless=True)
def _check_update_available(self, current_version):
timeout = 2 # Two seconds.
pypi_url = 'https://pypi.org/pypi/wandb/json'
try:
data = requests.get(pypi_url, timeout=timeout).json()
latest_version = data['info']['version']
except:
# Any issues whatsoever, just skip the latest version check.
return
# Return if no update is available
if parse_version(latest_version) <= parse_version(current_version):
return
# A new version is available!
wandb.termlog(
"Wandb version %s is available! To upgrade, please run:\n $ pip install wandb --upgrade" % latest_version)
def update_user_file_policy(self, policy):
with self._file_policy_lock:
for path in glob.glob(policy["glob"]):
save_name = os.path.relpath(path, self._run.dir)
# Remove the existing handler if we haven't already made it live
current = self._file_event_handlers.get(save_name)
is_live = isinstance(current, FileEventHandlerThrottledOverwriteMinWait)
if current and policy["policy"] == "live" and not is_live:
del self._file_event_handlers[save_name]
self._user_file_policies[policy["policy"]].append(policy["glob"])
def start_tensorboard_watcher(self, logdir, save=True):
try:
from wandb.tensorboard.watcher import Watcher, Consumer
dirs = [logdir] + [w.logdir for w in self._tensorboard_watchers]
rootdir = os.path.dirname(os.path.commonprefix(dirs))
if os.path.isfile(logdir):
filename = os.path.basename(logdir)
else:
filename = ""
# Tensorboard loads all tfevents files in a directory and prepends
# their values with the path. Passing namespace to log allows us
# to nest the values in wandb
namespace = logdir.replace(filename, "").replace(
rootdir, "").strip(os.sep)
# TODO: revisit this heuristic, it exists because we don't know the
# root log directory until more than one tfevents file is written to
if len(dirs) == 1 and namespace not in ["train", "validation"]:
namespace = None
with self._tensorboard_lock:
self._tensorboard_watchers.append(Watcher(logdir, self._watcher_queue, namespace=namespace, save=save))
if self._tensorboard_consumer is None:
self._tensorboard_consumer = Consumer(self._watcher_queue)
self._tensorboard_consumer.start()
self._tensorboard_watchers[-1].start()
return self._tensorboard_watchers
except ImportError:
wandb.termerror("Couldn't import tensorboard, not streaming events. Run `pip install tensorboard`")
def _sync_etc(self, headless=False):
# Ignore SIGQUIT (ctrl-\). The child process will handle it, and we'll
# exit when the child process does.
#
# We disable these signals after running the process so the child doesn't
# inherit this behaviour.
try:
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
except (AttributeError, ValueError): # SIGQUIT doesn't exist on windows, we can't use signal.signal in threads for tests
pass
# When not running in agent mode, start a status checker.
# TODO(adrnswanberg): Remove 'stop' command checking in agent code,
# and unconditionally start the status checker.
if self._run.sweep_id is None:
def stop_handler():
if isinstance(self.proc, Process):
# self.proc is a `Process` whenever we're the child process.
self.proc.interrupt()
else:
sig = signal.SIGINT
# We only check for windows in this block because on windows we
# always use `wandb run` (meaning we're the parent process).
if platform.system() == "Windows":
sig = signal.CTRL_C_EVENT # pylint: disable=no-member
self.proc.send_signal(sig)
if self._cloud:
self._run_status_checker = RunStatusChecker(
self._run, self._api, stop_requested_handler=stop_handler)
# Add a space before user output
wandb.termlog()
if wandb_env.get_show_run():
try:
webbrowser.open_new_tab(self._run.get_url(self._api))
except CommError:
pass
exitcode = None
try:
payload = b''
parse = False
logger.info("entering loop for messages from user process")
while True:
res = bytearray()
# We received multiple messages from the last socket read
if payload.find(b'\0') != -1:
res = payload
payload = b''
else:
try:
res = self._socket.recv(1024)
except socket.error as e:
# https://stackoverflow.com/questions/16094618/python-socket-recv-and-signals
if e.errno == errno.EINTR or isinstance(e, socket.timeout):
pass
else:
raise e
term = res.find(b'\0')
if term != -1:
payload += res[:term]
parse = True
else:
payload += res
if parse:
logger.info("received message from user process: %s" % payload.decode('utf8'))
try:
parsed = json.loads(payload.decode('utf8'))
except ValueError:
parsed = {}
if parsed.get("exitcode") is not None:
exitcode = parsed["exitcode"]
break
elif parsed.get("save_policy"):
self.update_user_file_policy(parsed["save_policy"])
payload = b''
parse = False
elif parsed.get("tensorboard"):
if parsed["tensorboard"].get("logdir"):
self.start_tensorboard_watcher(parsed["tensorboard"]["logdir"], parsed["tensorboard"]["save"])
payload = b''
parse = False
else:
message = "Invalid message received from child process: %s" % str(
payload)
wandb.termerror(message)
util.sentry_exc(message)
break
new_start = term + 1
# There's more to parse, add the remaining bytes
if len(res) > new_start:
payload = res[new_start:]
else:
exitcode = self.proc.poll()
if exitcode is not None:
break
time.sleep(1)
except KeyboardInterrupt:
logger.info("process received interrupt signal, shutting down")
exitcode = 255
if headless:
wandb.termlog('Ctrl-c pressed.')
else:
wandb.termlog(
'Ctrl-c pressed; waiting for program to end. Press ctrl-c again to kill it.')
try:
logger.info("waiting for process to finish")
while self.proc.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
pass
if self.proc.poll() is None:
logger.info("killing user process")
wandb.termlog('Program still alive. Killing it.')
try:
self.proc.kill()
except OSError:
pass
"""TODO(adrian): garbage that appears in the logs sometimes
Exception ignored in: <bound method Popen.__del__ of <subprocess.Popen object at 0x111adce48>>
Traceback (most recent call last):
File "/Users/adrian/.pyenv/versions/3.6.0/Python.framework/Versions/3.6/lib/python3.6/subprocess.py", line 760, in __del__
AttributeError: 'NoneType' object has no attribute 'warn'
"""
if exitcode is None:
exitcode = 254
wandb.termlog(
'Killing program failed; syncing files anyway. Press ctrl-c to abort syncing.')
else:
if exitcode == 0:
wandb.termlog('Program ended successfully.')
resume_path = os.path.join(wandb.wandb_dir(), wandb_run.RESUME_FNAME)
if os.path.exists(resume_path):
os.remove(resume_path)
else:
wandb.termlog(
'Program failed with code %d. Press ctrl-c to abort syncing.' % exitcode)
self._meta.data["exitcode"] = exitcode
if exitcode == 0:
self._meta.data["state"] = "finished"
elif exitcode == 255:
self._meta.data["state"] = "killed"
else:
self._meta.data["state"] = "failed"
# TODO(adrian): these can be slow to complete (due to joining?)
logger.info("closing log streams and sending exitcode to W&B")
self._close_stdout_stderr_streams()
self.shutdown(exitcode)
crash_nosync_time = wandb_env.get_crash_nosync_time(self.CRASH_NOSYNC_TIME)
# If we're not syncing to the cloud, we're done
if not self._cloud:
wandb.termlog("You can sync this run to the cloud by running: ")
wandb.termlog("wandb sync %s" % os.path.relpath(self._run.dir))
sys.exit(exitcode)
elif exitcode != 0 and crash_nosync_time and time.time() - START_TIME < crash_nosync_time:
wandb.termlog("Process crashed early, not syncing files")
logger.info("process only ran for %d seconds, not syncing files" % (time.time() - START_TIME))
sys.exit(exitcode)
# Show run summary/history
self._run.summary.load()
summary = self._run.summary._json_dict
if len(summary):
logger.info("rendering summary")
wandb.termlog('Run summary:')
max_len = max([len(k) for k in summary.keys()])
format_str = ' {:>%s} {}' % max_len
for k, v in summary.items():
# arrays etc. might be too large. for now we just don't print them
if isinstance(v, six.string_types):
if len(v) >= 20:
v = v[:20] + '...'
wandb.termlog(format_str.format(k, v))
elif isinstance(v, numbers.Number):
wandb.termlog(format_str.format(k, v))
self._run.history.load()
history_keys = self._run.history.keys()
# Only print sparklines if the terminal is utf-8
# In some python 2.7 tests sys.stdout is a 'cStringIO.StringO' object
# which doesn't have the attribute 'encoding'
if len(history_keys) and hasattr(sys.stdout, 'encoding') and sys.stdout.encoding == "UTF_8":
logger.info("rendering history")
wandb.termlog('Run history:')
max_len = max([len(k) for k in history_keys])
for key in history_keys:
vals = util.downsample(self._run.history.column(key), 40)
if any((not isinstance(v, numbers.Number) for v in vals)):
continue
line = sparkline.sparkify(vals)
format_str = u' {:>%s} {}' % max_len
wandb.termlog(format_str.format(key, line))
wandb_files = set([save_name for save_name in self._file_pusher.files() if util.is_wandb_file(save_name)])
media_files = set([save_name for save_name in self._file_pusher.files() if save_name.startswith('media')])
other_files = set(self._file_pusher.files()) - wandb_files - media_files
logger.info("syncing files to cloud storage")
if other_files:
wandb.termlog('Syncing files in %s:' % os.path.relpath(self._run.dir))
for save_name in sorted(other_files):
wandb.termlog(' %s' % save_name)
wandb.termlog('plus {} W&B file(s) and {} media file(s)'.format(len(wandb_files), len(media_files)))
else:
wandb.termlog('Syncing {} W&B file(s) and {} media file(s)'.format(len(wandb_files), len(media_files)))
self._file_pusher.update_all_files()
self._file_pusher.print_status()
try:
url = self._run.get_url(self._api)
wandb.termlog('Synced{} {}'.format(format_run_name(self._run), url))
logger.info("syncing complete: %s" % url)
except CommError as e:
wandb.termwarn(e.message)
sys.exit(exitcode)
|
segment_coco.py
|
import argparse
import json
import logging
import os
import threading
from os.path import exists, join, split, dirname
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
import torch.utils.data
from torch import nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import dla_up
import data_transforms as transforms
import dataset
from torch.utils.tensorboard import SummaryWriter
from ptc_dataset import BasicDataset
from dataset_trans import BasicDataset #TODO
from dataset_coco import COCOSeg
import copy
from torch.utils.data import DataLoader, random_split
import sys
sys.path.append('lib/')
try:
from modules import batchnormsync
HAS_BN_SYNC = True
except ImportError:
HAS_BN_SYNC = False
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
global_step = 0 #TODO
CITYSCAPE_PALLETE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
def f_print(*obj):
fn='./output.log'
print(obj)
with open(fn, 'a+') as f:
print(obj, file=f)
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False, out_size=False, binary=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.out_size = out_size
self.binary = binary
self.read_lists()
def __getitem__(self, index):
image = Image.open(join(self.data_dir, self.image_list[index]))
data = [image]
if self.label_list is not None:
label_map = Image.open(join(self.data_dir, self.label_list[index]))
if self.binary:
label_map = Image.fromarray(
(np.array(label_map) > 0).astype(np.uint8))
data.append(label_map)
if self.bbox_list is not None:
data.append(Image.open(join(self.data_dir, self.bbox_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
if self.out_size:
data.append(torch.from_numpy(np.array(image.size, dtype=int)))
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
bbox_path = join(self.list_dir, self.phase + '_bboxes.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
if exists(bbox_path):
self.bbox_list = [line.strip() for line in open(bbox_path, 'r')]
assert len(self.image_list) == len(self.bbox_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(join(self.data_dir,
self.label_list[index])))
# data = list(self.transforms(*data))
if len(data) > 1:
out_data = list(self.transforms(*data))
else:
out_data = [self.transforms(*data)]
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10, cityscape=False):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
pos_scores = AverageMeter()
neg_scores = AverageMeter()
recall_scores = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():#TODO
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda().float()
# target = target.cuda(async=True)
target = target.cuda().long()
output = model(input)[0]
# print("model done")
# print(output)
loss = criterion(output, target)
# print("loss done")
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
# losses.update(loss.data[0], input.size(0))
losses.update(loss.item(), input.size(0))
# print("model done")
if eval_score is not None:
# TODO cahnged
score.update(eval_score(output, target), input.size(0))
recall_scores.update(recall(output, target), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})\t'
'recall_score {recall_score.val:.3f} ({recall_score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score, recall_score=recall_scores)) #TODO , flush=True
print(' * Score {top1.avg:.3f}\recall_score{recall_score.avg:.3f}'.format(top1=score, recall_score=recall_scores))
return score.avg, losses.avg, recall_scores.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
# correct = correct[target != 255]
correct = correct[target != 255]
correct = correct.view(-1)
if correct.size(0)!=0:
score = correct.float().sum(0).mul(100.0 / correct.size(0))
else:
return 100.
# return score.data[0]
return score.item()
def recall(output, t_target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = copy.deepcopy(t_target)
target[torch.where(target==0)] = 255 #Ignore background
target = target.view(1, -1)
correct = pred.eq(target)
# correct = correct[target != 255]
correct = correct[target != 255]
correct = correct.view(-1)
if correct.size(0)!=0:
score = correct.float().sum(0).mul(100.0 / correct.size(0))
else:
return 100.
# return score.data[0]
return score.item()
def mIOU(output, target, cityscape=False):
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
if cityscape:
pred[torch.where(pred==13)] = 1
pred[torch.where(pred==14)] = 1
pred[torch.where(pred==15)] = 1
pred[torch.where(pred!=1)] = 0
positive_target = target[target == 1]
positive_pred = pred[target == 1]
negtive_target = target[target == 0]
negtive_pred = pred[target == 0]
positive_union = positive_pred.eq(positive_target)
positive_union = positive_union.view(-1).float().sum(0)
positive_target = target[target != -1]
positive_pred = pred[target != -1]
pos_section_pred = positive_pred.eq(1).view(-1).float().sum(0)
pos_section_target = positive_target.eq(1).view(-1).float().sum(0)
pos_intersection = pos_section_pred + pos_section_target - positive_union
if pos_intersection>0:
pos_score = positive_union.mul(100.0 / pos_intersection).item()
else:
pos_score = 100.0
negtive_union = negtive_pred.eq(negtive_target)
negtive_union = negtive_union.view(-1).float().sum(0)
negtive_target = target[target != -1]
negtive_pred = pred[target != -1]
neg_section_pred = negtive_pred.eq(0).view(-1).float().sum(0)
neg_section_target = negtive_target.eq(0).view(-1).float().sum(0)
neg_intersection = neg_section_pred + neg_section_target - negtive_union
if neg_intersection>0:
neg_score = negtive_union.mul(100.0 / neg_intersection).item()
else:
neg_score = 100.0
#print("pos", pos_score, "neg", neg_score)
return (pos_score + neg_score) / 2
def posIOU(output, target, cityscape=False):
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
if cityscape:
pred[torch.where(pred==13)] = 1
pred[torch.where(pred==14)] = 1
pred[torch.where(pred==15)] = 1
pred[torch.where(pred!=1)] = 0
positive_target = target[target == 1]
positive_pred = pred[target == 1]
# negtive_target = target[target == 0]
# negtive_pred = pred[target == 0]
positive_union = positive_pred.eq(positive_target)
positive_union = positive_union.view(-1).float().sum(0)
positive_target = target[target != -1]
positive_pred = pred[target != -1]
pos_section_pred = positive_pred.eq(1).view(-1).float().sum(0)
pos_section_target = positive_target.eq(1).view(-1).float().sum(0)
pos_intersection = pos_section_pred + pos_section_target - positive_union
if pos_intersection>0:
pos_score = positive_union.mul(100.0 / pos_intersection).item()
else:
pos_score = 100.0
# negtive_union = negtive_pred.eq(negtive_target)
# negtive_union = negtive_union.view(-1).float().sum(0)
# negtive_target = target[target != -1]
# negtive_pred = pred[target != -1]
# neg_section_pred = negtive_pred.eq(0).view(-1).float().sum(0)
# neg_section_target = negtive_target.eq(0).view(-1).float().sum(0)
# neg_intersection = neg_section_pred + neg_section_target - negtive_union
# if neg_intersection>0:
# neg_score = negtive_union.mul(100.0 / neg_intersection).item()
# else:
# neg_score = 100.0
#print("pos", pos_score, "neg", neg_score)
return pos_score
def negIOU(output, target, cityscape=False):
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
if cityscape:
pred[torch.where(pred==13)] = 1
pred[torch.where(pred==14)] = 1
pred[torch.where(pred==15)] = 1
pred[torch.where(pred!=1)] = 0
# positive_target = target[target == 1]
# positive_pred = pred[target == 1]
negtive_target = target[target == 0]
negtive_pred = pred[target == 0]
# positive_union = positive_pred.eq(positive_target)
# positive_union = positive_union.view(-1).float().sum(0)
# positive_target = target[target != -1]
# positive_pred = pred[target != -1]
# pos_section_pred = positive_pred.eq(1).view(-1).float().sum(0)
# pos_section_target = positive_target.eq(1).view(-1).float().sum(0)
# pos_intersection = pos_section_pred + pos_section_target - positive_union
# if pos_intersection>0:
# pos_score = positive_union.mul(100.0 / pos_intersection).item()
# else:
# pos_score = 100.0
negtive_union = negtive_pred.eq(negtive_target)
negtive_union = negtive_union.view(-1).float().sum(0)
negtive_target = target[target != -1]
negtive_pred = pred[target != -1]
neg_section_pred = negtive_pred.eq(0).view(-1).float().sum(0)
neg_section_target = negtive_target.eq(0).view(-1).float().sum(0)
neg_intersection = neg_section_pred + neg_section_target - negtive_union
if neg_intersection>0:
neg_score = negtive_union.mul(100.0 / neg_intersection).item()
else:
neg_score = 100.0
#print("pos", pos_score, "neg", neg_score)
return neg_score
def train(train_loader, model, criterion, optimizer, epoch, lr_scheduler, writer,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
pos_scores = AverageMeter()
neg_scores = AverageMeter()
scores = AverageMeter()
recall_score = AverageMeter()
global global_step
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# pdb.set_trace()
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda().float()
# target = target.cuda(async=True)
target = target.cuda().long()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
writer.add_scalar('Loss/train', loss.item(), global_step)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
# losses.update(loss.data[0], input.size(0))
losses.update(loss.item(), input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
recall_score.update(recall(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step() #TODO
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
global_step += 1
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})\t'
'recall_Score {recall_score.val:.3f} ({recall_score.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores, recall_score=recall_score))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parse_args()
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, join(args.checkpoint_dir, 'model_best.pth.tar'))
def train_seg(args):
writer = SummaryWriter(comment = args.log)
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
checkpoint_dir = args.checkpoint_dir
print(' '.join(sys.argv))
# logger.info(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
pretrained_base = args.pretrained_base
# print(dla_up.__dict__.get(args.arch))
single_model = dla_up.__dict__.get(args.arch)(
classes=args.classes, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
print('model_created')
if args.bg_weight > 0:
weight_array = np.ones(args.classes, dtype=np.float32)
weight_array[0] = args.bg_weight
weight = torch.from_numpy(
weight_array)
# criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
else:
# criterion = nn.NLLLoss2d(ignore_index=255)
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.append(transforms.RandomCrop(crop_size)) #TODO
if args.random_color:
t.append(transforms.RandomJitter(0.4, 0.4, 0.4))
t.extend([transforms.RandomHorizontalFlip()]) #TODO
t_val = []
t_val.append(transforms.RandomCrop(crop_size))
train_json = '/shared/xudongliu/COCO/annotation2017/annotations/instances_train2017.json'
train_root = '/shared/xudongliu/COCO/train2017/train2017'
my_train = COCOSeg(train_root, train_json, transforms.Compose(t), is_train=True)
val_json = '/shared/xudongliu/COCO/annotation2017/annotations/instances_val2017.json'
val_root = '/shared/xudongliu/COCO/2017val/val2017'
my_val = COCOSeg(val_root, val_json, transforms.Compose(t_val), is_train=True)
train_loader = torch.utils.data.DataLoader(
my_train, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
my_val, batch_size=20, shuffle=False, num_workers=num_workers,pin_memory=True) #TODO batch_size
print("loader created")
# optimizer = torch.optim.Adam(single_model.optim_parameters(),
# args.lr,
# weight_decay=args.weight_decay) #TODO adam optimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,momentum=args.momentum,
weight_decay=args.weight_decay) #TODO adam optimizer
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=32) #TODO
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
# TODO test val
# print("test val")
# prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
print('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, lr_scheduler,
eval_score=accuracy, writer=writer)
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict()
}, is_best=False, filename=checkpoint_path)
# evaluate on validation set
prec1, loss_val, recall_val = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
writer.add_scalar('accuracy/epoch', prec1, epoch+1)
writer.add_scalar('loss/epoch', loss_val, epoch+1)
writer.add_scalar('recall/epoch', recall_val, epoch+1)
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_freq == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
writer.close()
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10
every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def crop_image(image, size):
left = (image.size[0] - size[0]) // 2
upper = (image.size[1] - size[1]) // 2
right = left + size[0]
lower = upper + size[1]
return image.crop((left, upper, right, lower))
def save_output_images(predictions, filenames, output_dir, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_prob_images(prob, filenames, output_dir, sizes=None):
for ind in range(len(filenames)):
im = Image.fromarray(
(prob[ind][1].squeeze().data.cpu().numpy() * 255).astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name, size) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
prob = torch.exp(final)
if save_vis:
save_output_images(pred, name, output_dir, size)
if prob.size(1) == 2:
save_prob_images(prob, name, output_dir + '_prob', size)
else:
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
print('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
print('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
ious = per_class_iu(hist) * 100
print(' '.join('{:.03f}'.format(i) for i in ious))
if has_gt: # val
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = dla_up.__dict__.get(args.arch)(
args.classes, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
# data_dir = args.data_dir
# info = dataset.load_dataset_info(data_dir)
# normalize = transforms.Normalize(mean=info.mean, std=info.std)
# scales = [0.5, 0.75, 1.25, 1.5, 1.75]
# scales = [0.5, 0.75, 1.25, 1.5]
# t = []
# if args.crop_size > 0:
# t.append(transforms.PadToSize(args.crop_size))
# t.extend([transforms.ToTensor(), normalize])
# if args.ms:
# data = SegListMS(data_dir, phase, transforms.Compose(t), scales)
# else:
# data = SegList(data_dir, phase, transforms.Compose(t),
# out_name=True, out_size=True,
# binary=args.classes == 2)
test_loader = torch.utils.data.DataLoader(
data,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
# print("=> loaded checkpoint '{}' (epoch {})"
# .format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
print('mAP: ', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(
description='DLA Segmentation and Boundary Prediction')
parser.add_argument('cmd', choices=['train', 'test'])
# parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
# parser.add_argument('--train-samples', default=16000, type=int)
parser.add_argument('--loss', default='l1', type=str)
# parser.add_argument('--test-batch-size', type=int, default=1000,
# metavar='N',
# help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='- seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging '
'training status')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-base', default=None,
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--down', default=2, type=int, choices=[2, 4, 8, 16],
help='Downsampling ratio of IDA network output, which '
'is then upsampled to the original resolution '
'with bilinear interpolation.')
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--lr-mode', default='step')
parser.add_argument('--bn-sync', action='store_true', default=False)
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--random-color', action='store_true', default=False)
parser.add_argument('--save-freq', default=10, type=int)
parser.add_argument('--ms', action='store_true', default=False)
parser.add_argument('--bg-weight', type=float, default=-1)
parser.add_argument('--test-suffix', default='')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-i', '--checkpoint-dir')
parser.add_argument('--log')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# assert args.data_dir is not None
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
return args
def main():
args = parse_args()
if not exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.bn_sync:
if HAS_BN_SYNC:
dla_up.set_bn(batchnormsync.BatchNormSync)
else:
print('batch normalization synchronization across GPUs '
'is not imported.')
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
# validate(val_loader, model, criterion, eval_score=None, print_freq=10)
def my_val():
args = parse_args()
val_dir_img = '/shared/xudongliu/data/argoverse-tracking/argo_track/val/image_02/'
val_dir_mask = '/shared/xudongliu/data/argoverse-tracking/argo_track/val/npy_mask/'
my_val = BasicDataset(val_dir_img, val_dir_mask, None, is_train=False)
val_loader = torch.utils.data.DataLoader(
# SegList(data_dir, 'val', transforms.Compose([
# transforms.RandomCrop(crop_size),
# # transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ]),
# binary=(args.classes == 2)),
my_val, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,pin_memory=True)
single_model = dla_up.__dict__.get(args.arch)(
args.classes, down_ratio=args.down)
checkpoint = torch.load(args.resume)
# print(checkpoint['epoch'])
# model.load_state_dict(checkpoint['state_dict'])
single_model.load_state_dict(checkpoint)
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
score = validate(val_loader, model, criterion, eval_score=accuracy, print_freq=10, cityscape=True)
print(score)
if __name__ == '__main__':
main()
# my_val()
|
udpClient.py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
'''
@Time : 2021/03/16 10:35:28
@Author : Camille
@Version : 0.7beta
'''
from concurrent import futures
import socket
from sys import argv
import time
import threading
import struct
import uuid
import json
import os, subprocess
from concurrent.futures import thread, ThreadPoolExecutor
import queue
from auxiliaryTools import PrettyCode, ChangeRedis, BasicLogs
class Client():
def __init__(self) -> None:
socket.setdefaulttimeout(5)
self.config = Client._loadingConfig()
self.maternalIpInfo = None
self.tcpClientSocket = None
# 线程配置
self.event = threading.Event()
self.lock = threading.Lock()
self.tcpOnline = queue.Queue(1)
# 任务池
self.taskPool = ThreadPoolExecutor(max_workers=10)
# 报文信息(任务汇报)
self.initializationTaskInfo = {
'flag': None,
'code': None,
'working': None,
'complete': [],
'oncall': []
}
# 实例化日志
logName = 'ant_{}.log'.format(time.strftime('%S_%M_%H_%d_%m_%Y'))
self.logObj = BasicLogs.handler(logName=logName)
# 实例化redis
self.redisObj = ChangeRedis(self.config.get('redisConfig'))
PrettyCode.prettyPrint('redis server 连接成功。')
# 启动前检查
self.checkBeforeStarting()
# 日志开关
self.logEncrypt = True
def checkBeforeStarting(self):
# 运行前的一些检查,防止错误启动
# 端口检查
pid = self._checkPort(6655)
if pid:
process = self._findProcess(pid)
self._killProcess(process)
def recvMsg(self) -> None:
# udp
self.udpClientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while 1:
if self.maternalIpInfo:
# TCP创建完成后才能拿到地址
try:
self.udpClientSocket.bind(self.maternalIpInfo)
except Exception as e:
# UDP连接异常排查
self.logObj.logHandler().error(e)
self.checkBeforeStarting()
time.sleep(5)
self.udpClientSocket.bind(self.maternalIpInfo)
break
continue
PrettyCode.prettyPrint('UDP对象创建成功。')
# 永久等待信息下发
self.udpClientSocket.settimeout(None)
while 1:
try:
data = self.udpClientSocket.recvfrom(1024)
recvMsg = data[0].decode('utf-8')
except Exception as e:
# 数据意料之外的情况
self.logObj.logHandler().error(e)
# 通知重发
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(3, recvMsg)))
if recvMsg:
msg = '数据已接收:{}\n'.format(recvMsg)
logMsg = 'Data received - {}'.format(recvMsg)
self.logObj.logHandler().info(logMsg)
PrettyCode.prettyPrint(msg)
# 正常应答
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(1, recvMsg)))
# 判断信息类型
if recvMsg.startswith('AC'):
# redis任务编码信息
tips = '开始执行任务,任务编号: {}'.format(msg)
PrettyCode.prettyPrint(tips)
# 执行任务
self._performOrderRedis(recvMsg)
else:
# cmd指令
self._performOrderCMD(recvMsg)
recvMsg
continue
self.udpClientSocket.close()
def sendMsg(self, msg) -> None:
"""构建报头
Args:
msg (str): 发送的信息。
Raises:
e: 预料之外的错误。
"""
while 1:
msg = str(msg)
try:
if not self.tcpClientSocket:
break
# 加锁
self.lock.acquire()
msgPack = struct.pack('i', len(msg))
self.tcpClientSocket.send(msgPack)
self.tcpClientSocket.send(msg.encode('utf-8'))
PrettyCode.prettyPrint('发送成功。')
# 释放锁
self.lock.release()
if 'keepAlive' not in msg:
# 判断是普通心跳包还是其他信息
break
# 发送间隔
time.sleep(5)
except socket.timeout as timeoutE:
# 释放锁
self.lock.release()
PrettyCode.prettyPrint('发送超时,正在尝试重新发送。', 'ERROR')
continue
except Exception as e:
# 释放锁
self.lock.release()
errorMsg = '{}{}'.format(self._errorCheck(e), ',现在重启TCP。')
PrettyCode.prettyPrint(errorMsg ,'ERROR')
# 清空TCP客户端连接
self.tcpClientSocket = None
raise e
def makeInfoMsg(self, taskStatus: dict = {}) -> str:
# 构建报文,default = 'keepAlive',必须携带flag字段
if not taskStatus:
taskStatus = {
'flag': 'ADH18',
'phase': 1,
'ACK': 'keepAlive',
}
if 'flag' not in taskStatus.keys():
self.logObj.logHandler().error('msg need flag.')
raise ValueError('缺少flag值')
msg = json.dumps(taskStatus)
return msg
def TCPConnect(self) -> None:
while 1:
# tcp
if self.tcpOnline.empty():
# 离线状态
tcpClientSocket = socket.socket()
PrettyCode.prettyPrint('TCP对象创建成功。')
# 重连次数
nOfRec = 0
# 连接服务器异常处理
while 1:
recingMsg = '正在连接服务器中 {}'.format(nOfRec)
PrettyCode.prettyPrint(recingMsg)
try:
hostIP = self.config.get('serverConfig').get('host')
tcpClientSocket.connect((hostIP, 11451))
# 获取与套接字关联的本地协议地址
self.maternalIpInfo = (tcpClientSocket.getsockname()[0], 6655)
break
except:
nOfRec += 1
continue
self.tcpOnline.put('ONLINE')
# 连接成功,event为True
self.event.set()
PrettyCode.prettyPrint('服务器连接成功。')
self.tcpClientSocket = tcpClientSocket
time.sleep(10)
def heartbeat(self) -> None:
while 1:
# 循环做异常判断检测用
if not self.tcpClientSocket:
break
# 普通心跳包
msg = self.makeInfoMsg()
try:
# 函数内层会进入循环
# 普通心跳包持续发送
self.sendMsg(msg)
except Exception as e:
# 心跳逻辑层异常
errorMsg = '[hb Error]意料之外的错误,将关闭本次TCP连接。错误信息:{} - {}'.format(e, e.__traceback__.tb_lineno)
PrettyCode.prettyPrint(errorMsg, 'ERROR')
break
# 心跳进程结束
if self.tcpClientSocket:
self.tcpClientSocket.close()
@staticmethod
def performOrderResult(worker):
"""任务执行结果
Args:
worker (obj): sub对象。
Returns:
str: 任务结果信息。
"""
worker.add_done_callback(worker.result)
while 1:
if worker.done():
result = worker.result()
return result
time.sleep(1)
def _performOrderCMD(self, order: str) -> None:
"""执行CMD命令函数
Args:
order (str): CMD命令
"""
self.lock.acquire()
logMsg = 'Task started - {}'.format(order)
self.logObj.logHandler().info(logMsg)
worker = self.taskPool.submit(self.taskExecuteCMD, order, )
self.lock.release()
result = Client.performOrderResult(worker)
msg = '{} - 任务完成。'.format(order)
PrettyCode.prettyPrint(msg)
def _performOrderRedis(self, taskId: str, standardEnd=True) -> None:
"""执行Redis命令函数
Args:
taskId (str): 任务编号
standardEnd (bool, optional): 执行模式. Defaults to True.
"""
# 获取任务列表,从优先级最高到最低(zrange value 低的值优先级高) -> (任务,优先级)
try:
taskBook = self.redisObj.redisPointer().zrange(taskId, 0, -1, withscores=True, desc=True)
if taskBook:
# 正常获取
PrettyCode.prettyPrint('任务获取成功。')
# 构造ADH27 -> 接收报文
initializationTaskInfo = {
'flag': 'ADH27',
'code': taskId,
'phase': 1,
'working': None,
'complete': [],
# 添加任务至未完成列表并上传到redis
'oncall': [i[0] for i in taskBook],
}
# 发送讯息已经接收到任务,即将开始执行
taskInfo = self.makeInfoMsg(initializationTaskInfo)
# print('接收报文', taskInfo)
self.sendMsg(taskInfo)
else:
# 任务book为空,通知SERVER
raise ValueError('taskbook is null!')
except Exception as e:
# 发送rcc为2的ADH33报文
errorMsg = '{} - {}'.format(e, e.__traceback__.tb_lineno)
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(2, taskId, (errorMsg, ))))
PrettyCode.prettyPrint('任务获取失败。')
raise ValueError('任务获取失败。')
# 开始执行任务
for task in taskBook:
# 上锁
self.lock.acquire()
msg = '开始执行 - {}'.format(task[0])
PrettyCode.prettyPrint(msg)
# 向线程池提交任务 -> (任务,优先级)
worker = self.taskPool.submit(self.taskExecuteCMD, task[0], )
self.lock.release()
# 发送执行报文
initializationTaskInfo['phase'] = 2
initializationTaskInfo['working'] = task[0]
taskInfo = self.makeInfoMsg(initializationTaskInfo)
self.sendMsg(taskInfo)
# print('执行报文', taskInfo)
worker.add_done_callback(worker.result)
result = Client.performOrderResult(worker)
# 发送任务执行完成报文
initializationTaskInfo['phase'] = 3
taskStatusDict = self._taskReportMsgComplete(initializationTaskInfo, task[0])
taskInfo = self.makeInfoMsg(taskStatusDict)
# print('完成报文', taskInfo)
self.sendMsg(taskInfo)
msg = '{} - 任务完成。'.format(task[0])
PrettyCode.prettyPrint(msg)
# 任务执行间隔
time.sleep(5)
return True
def taskExecuteCMD(self, task):
"""任务执行函数
Args:
task (str): 任务执行命令
"""
try:
self.lock.acquire()
msg = '正在执行 - {}'.format(task)
PrettyCode.prettyPrint(msg)
executor = subprocess.Popen(task, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
result = executor.stdout.read().decode('gbk')
self.lock.release()
return result
except Exception as e:
errorMsg = '{} - {}'.format(e, e.__traceback__.tb_lineno)
self.logObj.logHandler().error(errorMsg)
self.lock.release()
def daemonlogic(self, existsInfo: dict):
# 守护进程
while 1:
for tName, tFunc in existsInfo.items():
if tName not in str(threading.enumerate()):
# 监测离线
if tName == 'heartbeta':
# tcpOnline此时为空,即代表offline
self.tcpOnline.get()
# 如果连接成功则event为true, wait立即返回;如果服务器离线event则改为false,开始阻塞并等待event重新变成true
# 这里进入离线监测了,代表此时TCP已经离线,则设置event为false
self.event.clear()
self.event.wait()
tFunc().start()
time.sleep(10)
def _taskReportMsgComplete(self, info: dict, task: str):
# 当一个任务执行完后更新信息
info['working'] = None
info.get('complete').append(task)
if task == info.get('oncall')[0]:
info.get('oncall').pop(0)
else:
info.get('oncall').remove(task)
return info
def _taskReport(self, code, func):
# 结果信息情况汇报(需要采集客户端信息通道)
report = {
'identifier': code,
'kp': 'keepRogerThat',
'systemInfoTask': func()
}
msg = self.makeInfoMsg(report)
self.sendMsg(msg)
@staticmethod
def _loadingConfig():
# 配置文件
return PrettyCode.loadingConfigJson(r'config.json')
@staticmethod
def _errorCheck(errorInfo):
# 异常情况分析,给出合理错误结果
if str(errorInfo).startswith('[WinError 10054]'):
# 远程主机强迫关闭了一个现有的连接
return '服务器离线'
else:
return '意料之外的错误'
@staticmethod
def _getClientSystemInfo():
# 获取系统信息
hostname = socket.gethostname()
localAddrs = socket.getaddrinfo(hostname, None)
localAddrsIPV4 = [ip[4][0] for ip in localAddrs if ':' not in ip[4][0]]
# 获取mac地址
macUUID = uuid.UUID(int=uuid.getnode()).hex[-12:]
macAddress = '-'.join(macUUID[i: i + 2] for i in range(0, 11, 2))
localInfo = {
'hostname': hostname,
'localAddrsIPV4': localAddrsIPV4,
'MACAddress': macAddress,
}
return localInfo
def _checkPort(self, port: int) -> bool:
# 端口检查
order = 'netstat -ano|findstr {}'.format(port)
# result = subprocess.Popen(order, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = self.taskExecuteCMD(order)
if result:
# 端口被占用
pid = result.split()[-1]
return pid
else:
# 端口未被占用
return False
def _findProcess(self, pid):
# 进程查找
order = 'tasklist|findstr "{}"'.format(pid)
process = self.taskExecuteCMD(order)
process = process.split()[0]
return process
def _killProcess(self, process):
# 结束进程
try:
order = 'taskkill /f /t /im {}'.format(process)
self.taskExecuteCMD(order)
return True
except Exception as e:
self.logObj.logHandler().error(e)
return False
def _structureADH33Msg(self, rcc, taskId=None, *args, **kwargs) -> dict:
answer = {
'flag': 'ADH33',
'RCC': rcc,
'taskId': taskId,
'answerTime': time.time(),
}
errorMsg = args[0][0]
if errorMsg:
answer['errorMsg'] = errorMsg
return answer
def _daemonThread(self, existsInfo: dict) -> thread:
daemonThread = threading.Thread(target=self.daemonlogic, name='daemonThread', args=(existsInfo, ))
daemonThread.setDaemon(True)
return daemonThread
def _hbControl(self):
# 激活心跳
return threading.Thread(target=self.heartbeat, name='heartbeta')
def _dataReportControl(self, method):
# 数据信息汇报
if method == 'get_system':
self._taskReport('ADH56', self._getClientSystemInfo)
def _recvMsgControl(self):
# 接收信息
return threading.Thread(target=self.recvMsg, name='recvMsg')
def dispatch(self):
threadInfoDict = {
'heartbeta': self._hbControl,
'recvMsg': self._recvMsgControl,
}
tPool = ThreadPoolExecutor(max_workers=10)
# 如果此时event为false即代表server已经成功连上,当event为true时,即开始以下线程
self.event.wait()
self._recvMsgControl().start()
self._hbControl().start()
self._daemonThread(threadInfoDict).start()
# 发送在线设备信息
# dataReport = self.taskPool.submit(self._dataReportControl, 'get_system', )
time.sleep(2)
# if dataReport.done():
# PrettyCode.prettyPrint('主机信息上传完成。')
def main(self):
threading.Thread(target=self.TCPConnect, name='TCPConnect').start()
threading.Thread(target=self.dispatch, name='dispatch').start()
def testTask():
pass
if __name__ == "__main__":
mole = Client()
mole.main()
# mole.performOrder('AC131')
|
car_helpers.py
|
import os
import threading
import json
import requests
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.version import comma_remote, tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from common.travis_checker import travis
from common.op_params import opParams
if not travis:
import selfdrive.crash as crash
op_params = opParams()
use_car_caching = op_params.get('use_car_caching')
from cereal import car, log
EventName = car.CarEvent.EventName
HwType = log.HealthData.HwType
def get_startup_event(car_recognized, controller_available):
if comma_remote and tested_branch:
event = EventName.startup
else:
event = EventName.startupMaster
if not car_recognized:
event = EventName.startupNoCar
elif car_recognized and not controller_available:
event = EventName.startupNoControl
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
params = Params()
car_params = params.get("CarParams")
if not travis:
cached_fingerprint = params.get('CachedFingerprint')
else:
cached_fingerprint = None
if car_params is not None:
car_params = car.CarParams.from_bytes(car_params)
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if has_relay and not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
fw_candidates, car_fw = set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
if cached_fingerprint is not None and use_car_caching: # if we previously identified a car and fingerprint and user hasn't disabled caching
cached_fingerprint = json.loads(cached_fingerprint)
if cached_fingerprint[0] is None or len(cached_fingerprint) < 3:
params.delete('CachedFingerprint')
else:
finger[0] = {int(key): value for key, value in cached_fingerprint[2].items()}
source = car.CarParams.FingerprintSource.can
return (str(cached_fingerprint[0]), finger, vin, car_fw, cached_fingerprint[1])
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1:
if frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
elif len(candidate_cars[b]) < 4: # For the RAV4 2019 and Corolla 2020 LE Fingerprint problem
if frame > 180:
if any(("TOYOTA COROLLA TSS2 2019" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA COROLLA TSS2 2019"
if any(("TOYOTA COROLLA HYBRID TSS2 2019" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA COROLLA HYBRID TSS2 2019"
if any(("TOYOTA PRIUS 2017" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA PRIUS 2017"
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
params.put("CachedFingerprint", json.dumps([car_fingerprint, source, {int(key): value for key, value in finger[0].items()}]))
return car_fingerprint, finger, vin, car_fw, source
def is_connected_to_internet(timeout=5):
try:
requests.get("https://sentry.io", timeout=timeout)
return True
except:
return False
def crash_log(candidate):
while True:
if is_connected_to_internet():
crash.capture_warning("fingerprinted %s" % candidate)
break
def crash_log2(fingerprints, fw):
while True:
if is_connected_to_internet():
crash.capture_warning("car doesn't match any fingerprints: %s" % fingerprints)
crash.capture_warning("car doesn't match any fw: %s" % fw)
break
def get_car(logcan, sendcan, has_relay=False):
candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
if not travis:
y = threading.Thread(target=crash_log2, args=(fingerprints,car_fw,))
y.start()
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
if not travis:
x = threading.Thread(target=crash_log, args=(candidate,))
x.start()
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
return CarInterface(car_params, CarController, CarState), car_params
|
penv.py
|
import torch
from multiprocessing import Process, Pipe
from typing import List, Callable, Dict
import gym
def worker(conn, env_suppliers:list):
envs = [supplier() for supplier in env_suppliers]
while True:
cmd, data = conn.recv()
if cmd == "step":
conn.send([env.step(d) for env,d in zip(envs,data)])
elif cmd == "reset":
conn.send([env.reset() for env in envs])
elif cmd == 'get_spaces':
conn.send((envs[0].observation_space, envs[0].action_space))
else:
raise NotImplementedError
def receive_process_answers(pipes):
LD = [x for p in pipes for x in p.recv()]
DL = {k: [dic[k] for dic in LD] for k in LD[0]}
return DL
class ParallelEnv(gym.Env):
def __init__(self,env_suppliers:List[Callable[[],gym.Env]],num_processes=1):
assert len(env_suppliers)%num_processes==0
self.envs_per_process=len(env_suppliers)//num_processes
self.locals = []
self.num_processes = num_processes
for k in range(num_processes):
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, self.kth_slice(env_suppliers,k)))
p.daemon = True
p.start()
remote.close()
self.locals[0].send(('get_spaces', None))
self.observation_space, self.action_space = self.locals[0].recv()
def kth_slice(self,x,k):
return x[k*self.envs_per_process:(k+1)*self.envs_per_process]
def reset(self):
for local in self.locals:
local.send(("reset", None))
return receive_process_answers(self.locals)
def step(self, actions):
for k,local in enumerate(self.locals):
local.send(("step", self.kth_slice(actions,k)))
return receive_process_answers(self.locals)
def render(self):
raise NotImplementedError
@staticmethod
def build(build_env_supplier, num_envs,num_processes):
return ParallelEnv([build_env_supplier(i) for i in range(num_envs)],num_processes)
class SingleEnvWrapper(gym.Env):
def __init__(self,env:gym.Env):
self.env = env
self.observation_space = env.observation_space
self.action_space = env.action_space
def step(self, action):
return {k:[v] for k,v in self.env.step(action).items()}
def reset(self):
return {k:[v] for k,v in self.env.reset().items()}
def render(self, mode='human'):
return self.env.render(mode)
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
# TODO: RUSTPYTHON
if sys.platform != "win32":
test_check_output_input_none_text = unittest.expectedFailure(test_check_output_input_none_text)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# TODO: RUSTPYTHON
@unittest.expectedFailure
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_communicate_pipe_buf = unittest.expectedFailure(test_communicate_pipe_buf)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_universal_newlines_communicate = unittest.expectedFailure(test_universal_newlines_communicate)
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_universal_newlines_communicate_stdin_stdout_stderr = unittest.expectedFailure(test_universal_newlines_communicate_stdin_stdout_stderr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_bufsize_equal_one_text_mode = unittest.expectedFailure(test_bufsize_equal_one_text_mode)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_communicate_epipe = unittest.expectedFailure(test_communicate_epipe)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_communicate_epipe_only_stdin = unittest.expectedFailure(test_communicate_epipe_only_stdin)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any argumenets that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
@unittest.skip("TODO: RUSTPYTHON, flaky test")
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
@unittest.skip("TODO: RUSTPYTHON, flaky test")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_kill_dead(self):
self._kill_dead_process('kill')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_call_keyboardinterrupt_no_kill = unittest.expectedFailure(test_call_keyboardinterrupt_no_kill)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_run_keyboardinterrupt_no_kill = unittest.expectedFailure(test_run_keyboardinterrupt_no_kill)
# TODO: RUSTPYTHON
if sys.platform == "win32":
test_getoutput = unittest.expectedFailure(test_getoutput)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
setup.py
|
import sys
import yaml
import re
import json
import logging
import threading
import queue
import time
from os import path
import kraken.cerberus.setup as cerberus
import kraken.kubernetes.client as kubecli
import kraken.invoke.command as runcommand
import kraken.pvc.pvc_scenario as pvc_scenario
import sshv.utils as utils
import sshv.log as log
import sshv.control as control
def run(scenarios_list, config):
namespace = "default"
failed_post_scenarios = ""
go_meter_pod = ""
lins_blkpvc_file = scenarios_list[0][0]
gomet_pod_file = scenarios_list[1][0]
stor_file = scenarios_list[2][0]
write_q = queue.Queue(maxsize = 1)
utils._init()
logger = log.Log()
utils.set_logger(logger)
stor_config = utils.ConfFile(stor_file)
kind = stor_config.get_kind()
times = int(stor_config.get_number_of_times())
versa_con = control.IscsiTest(stor_config)
pvc_resoure = kubecli.create_pvc(lins_blkpvc_file)
time.sleep(20)
with open(path.join(path.dirname(__file__), gomet_pod_file)) as f:
gomet_pod_config = yaml.safe_load(f)
metadata_config = gomet_pod_config["metadata"]
go_meter_pod = metadata_config.get("name", "")
kubecli.create_pod_spof(gomet_pod_config, namespace,lins_blkpvc_file, 120)
time.sleep(2)
err = versa_con.ckeck_drbd_status_spof(pvc_resoure, False)
if not err:
err = versa_con.check_drbd_crm_res(pvc_resoure, False)
if err:
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
# err = versa_con.ckeck_drbd_status_spof(pvc_resoure, False)
# if err:
# clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
# exit(1)
left_times = times
while(left_times):
down = False
logging.info("Times %d: For single failure, Go-meter start to write", times - left_times)
threading.Thread(target=gometer_write, args=(go_meter_pod, write_q)).start()
if kind == "node_down":
versa_con.down_node()
down = True
elif kind == "interface_down":
versa_con.change_node_interface(False)
elif kind == "switch_port_down":
versa_con.change_switch_port(False)
elif kind == "hand_operation":
logging.info("Please do hand operation...")
logging.info("Go-meter is writing, wait...")
err = write_q.get()
if err:
utils.prt_log('', f"Go meter write failed",1)
versa_con.get_log(down)
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
err = versa_con.ckeck_drbd_status_spof(pvc_resoure, down)
if not err:
err = versa_con.check_drbd_crm_res(pvc_resoure, down)
if err:
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
# err = versa_con.ckeck_drbd_status_spof(pvc_resoure, down)
# if err:
# clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
# exit(1)
logging.info("Go-meter start to compare")
command = "cd /go/src/app;./main compare"
response = kubecli.exec_cmd_in_pod(command, go_meter_pod, namespace)
logging.info("\n" + str(response))
if not "Finish" in response:
utils.prt_log('', f"Go meter compare failed",1)
if kind == "interface_down":
versa_con.change_node_interface(True)
elif kind == "switch_port_down":
versa_con.change_switch_port(True)
versa_con.get_log(down)
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
logging.info("Times %d:For fix single failure, Go-meter start to write", times - left_times)
threading.Thread(target=gometer_write, args=(go_meter_pod, write_q)).start()
if kind == "interface_down":
versa_con.change_node_interface(True)
elif kind == "switch_port_down":
versa_con.change_switch_port(True)
elif kind == "hand_operation":
logging.info("Please do hand operation...")
down = False
logging.info("Go-meter is writing, wait...")
err = write_q.get()
if err:
utils.prt_log('', f"Go meter write failed",1)
versa_con.get_log(down)
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
err = versa_con.ckeck_drbd_status_spof(pvc_resoure, down)
if not err:
err = versa_con.check_drbd_crm_res(pvc_resoure, down)
if err:
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
# err = versa_con.ckeck_drbd_status_spof(pvc_resoure, down)
# if err:
# clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
# exit(1)
logging.info("Go-meter start to compare")
command = "cd /go/src/app;./main compare"
response = kubecli.exec_cmd_in_pod(command, go_meter_pod, namespace)
logging.info("\n" + str(response))
if not "Finish" in response:
utils.prt_log('', f"Go meter compare failed",1)
versa_con.get_log(down)
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
exit(1)
left_times = left_times - 1
clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file)
def runtst(scenarios_list, config):
namespace = "default"
failed_post_scenarios = ""
#for app_config in scenarios_list:
lins_blkpvc_file = scenarios_list[0][0]
stor_file = scenarios_list[2][0]
gomet_pod_file = scenarios_list[1][0]
utils._init()
logger = log.Log()
utils.set_logger(logger)
stor_config = utils.ConfFile(stor_file)
versa_con = control.IscsiTest(stor_config)
versa_con.change_switch_port(False)
print(60000)
time.sleep(60)
versa_con.change_switch_port(True)
def clear_pvc_and_pod(go_meter_pod,namespace,lins_blkpvc_file):
kubecli.delete_pod(go_meter_pod, namespace)
kubecli.delete_pvc(lins_blkpvc_file)
def gometer_write(pod_name, write_q):
command = "cd /go/src/app;./main write"
response = kubecli.exec_cmd_in_pod(command, pod_name, "default")
logging.info("\n" + str(response))
if "Finish" in response:
write_q.put(0)
else:
write_q.put(1)
|
run_threaded.py
|
import functools
from threading import Thread
def threaded(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs)
wrapper.__thread__ = thread
try:
thread.start()
except KeyboardInterrupt:
thread.stop()
return thread
return wrapper
|
test_insert.py
|
import copy
import threading
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import util_pymilvus as ut
from common.constants import default_entity, default_entities, default_binary_entity, default_binary_entities, \
default_fields
from common.common_type import CaseLabel
from utils.util_log import test_log as log
# ADD_TIMEOUT = 60
uid = "test_insert"
field_name = ut.default_float_vec_field_name
binary_field_name = ut.default_binary_vec_field_name
default_nb = ut.default_nb
row_count = ut.row_count
default_tag = ut.default_tag
default_single_query = {
"data": ut.gen_vectors(1, ut.default_dim),
"anns_field": ut.default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": 10,
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
if request.param["index_type"] in ut.index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
log.info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = ut.gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == ut.default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = ut.default_segment_row_limit + 1
result = connect.insert(collection, ut.gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [ut.default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and
the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = ut.gen_unique_str("test_collection")
fields = {
"fields": [ut.gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = ut.gen_entities_by_fields(fields["fields"], nb, ut.default_dim, ids)
log.info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = ut.gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
log.info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
log.info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = ut.gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
log.info(connect.describe_collection(id_collection))
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=ut.default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatedly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = ut.add_vector_field(default_nb, ut.default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 15416")
# @pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
log.info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable auto-flush
method: disable auto-flush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
ut.disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=ut.gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, _ = ut.gen_search_vectors_params(binary_field_name, default_binary_entities,
ut.default_top_k, 1, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
log.debug(res)
assert len(res[0]) == ut.default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
log.info("In callback check status")
assert not result
def check_result(self, result):
log.info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = ut.gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
log.info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L1)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = ut.gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
assert len(res[0]) == 0
# @pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
"""
target: test insert with invalid field
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
field_value = get_field_int_value
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
"""
target: test insert with invalid entity
method: insert with invalid entity value
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
runtime.py
|
__all__ = [
"Runtime"
]
from traceback import (
print_exc
)
from threading import (
Thread
)
from collections import (
defaultdict,
deque
)
from itertools import (
repeat
)
from common import (
charcodes,
bstr,
notifier,
cached,
reset_cache
)
from .value import (
Returned,
Value
)
@notifier("break")
class Breakpoints(object):
def __init__(self, runtime):
self._rt = runtime
self._alive = True
def __call__(self):
self.__notify_break()
rt = self._rt
rt.on_resume()
# This breakpoint can be removed during preceding notification.
if self._alive:
rt.target.step_over_br()
# See: https://stackoverflow.com/a/5288992/7623015
def __bool__(self): # Py3
return bool(self.__break)
__nonzero__ = __bool__ # Py2
class Runtime(object):
"A context of debug session with access to DWARF debug information."
def __init__(self, target, dic, return_reg_name = None):
"""
:type target:
pyrsp.rsp.RemoteTarget
:param target:
debug session descriptor
:type dic:
DWARFInfoCache
:param dic:
a global context
"""
self.target = target
self.dic = dic
self.pc = target.registers.index(target.pc_reg)
# cache of register values converted to integer
self.regs = [None] * len(target.registers)
# support for `cached` decorator
self.__lazy__ = []
self.object_stack = deque()
if return_reg_name is None:
# TODO: account targets's calling convention
self.return_reg = 0
else:
self.return_reg = target.registers.index(return_reg_name)
# TODO: this must be done using DWARF because "bitsize" and address
# size are not same values semantically (but same by implementation).
self.address_size = target.arch["bitsize"] >> 3
# Version number of debug session. It is incremented on each target
# resumption. It helps detect using of not actual data. E.g. a local
# variable of a function which is already returned.
self.version = 0
# breakpoints and its handlers
self.brs = defaultdict(lambda : Breakpoints(self))
def add_br(self, addr_str, cb, quiet = False):
cbs = self.brs[addr_str]
if not cbs:
self.target.set_br_a(addr_str, cbs, quiet)
cbs.watch_break(cb)
def remove_br(self, addr_str, cb, quiet = False):
cbs = self.brs[addr_str]
cbs.unwatch_break(cb)
if not cbs:
cbs._alive = False
self.target.del_br(addr_str, quiet)
def on_resume(self, *_, **__):
""" When target resumes all cached data must be reset because it is
not actual now.
"""
self.version += 1
self.regs[:] = repeat(None, len(self.regs))
reset_cache(self)
def get_reg(self, idx):
regs = self.regs
val = regs[idx]
if val is None:
tgt = self.target
val_hex = tgt.regs[tgt.registers[idx]]
val = int(val_hex, 16)
regs[idx] = val
return val
def co_run_target(self):
target = self.target
def run():
try:
target.run(setpc = False)
except:
print_exc()
print("Target PC 0x%x" % (self.get_reg(self.pc)))
try:
target.send(b"k")
except:
print_exc()
t = Thread(target = run)
t.name = "RSP client"
t.start()
while t.isAlive():
yield False
@cached
def returned_value(self):
""" Value being returned by current subprogram. Note that it is
normally correct only when the target is stopped at the subprogram epilogue.
"""
pc = self.get_reg(self.pc)
val_desc = Returned(self.dic, self.return_reg, pc)
return Value(val_desc, runtime = self, version = self.version)
@cached
def subprogram(self):
"Subprogram corresponding to current program counter."
pc = self.get_reg(self.pc)
return self.dic.subprogram(pc)
@cached
def frame(self):
frame_expr = self.subprogram.frame_base
frame = frame_expr.eval(self)
return frame
@cached
def cfa(self):
pc = self.get_reg(self.pc)
cfa_expr = self.dic.cfa(pc)
cfa = cfa_expr.eval(self)
return cfa
def push(self, object_value):
self.object_stack.append(object_value)
def pop(self):
self.object_stack.pop()
@property
def object(self):
stack = self.object_stack
obj = stack.pop()
loc = obj.eval(self)
stack.append(obj)
return loc
def get_val(self, addr, size):
target = self.target
data = target.dump(size, addr)
if target.arch["endian"]:
data = reversed(data)
# there the data is big-endian
di = charcodes(data)
val = next(di)
for d in di:
val <<= 8
val += d
return val
def __iter__(self):
prog = self.subprogram
_locals = prog.data
if _locals is not None:
for _local in _locals:
yield _local
else:
cu = prog.die.cu
_globals = self.dic.get_CU_global_variables(cu)
for _global in _globals:
yield _global
def __getitem__(self, name):
""" Accessing variables by name.
Search order:
- current subprogram local data (variables, arguments, ...)
- global variables for compile unit of current subprogram
TODO: public global variables across all CUs
TODO: current CU's subprograms
TODO: public global subprograms
TODO: target registers
:param name:
of a variable
:returns:
corresponding runtime descriptor `Value`
"""
prog = self.subprogram
_locals = prog.data
bname = bstr(name)
try:
datum = _locals[bname]
except KeyError:
cu = prog.die.cu
_globals = self.dic.get_CU_global_variables(cu)
try:
datum = _globals[bname]
except KeyError:
raise KeyError("No name '%s' found in runtime" % name)
return Value(datum, runtime = self, version = self.version)
|
example3.py
|
import threading
import requests
import time
def ping(url):
res = requests.get(url)
print(f"{url}: {res.text}")
urls = [
"http://httpstat.us/200",
"http://httpstat.us/400",
"http://httpstat.us/404",
"http://httpstat.us/408",
"http://httpstat.us/500",
"http://httpstat.us/524",
]
start = time.time()
for url in urls:
ping(url)
print(f"Sequential: {time.time() - start : .2f} seconds")
print()
start = time.time()
threads = []
for url in urls:
thread = threading.Thread(target=ping, args=(url,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(f"Threading: {time.time() - start : .2f} seconds")
|
nodes2_job.py
|
import evaluate
import payout
#import db
import config
import logging
import time
import threading
#import requests
import json
# Evaluate node data
# After the end of every 10 minute period, after 1 minute run evaluation of the last 2+ slots
# After the end of every 4-hours, reevaluate the last 1+ days
config = config.readConfig()
logging.basicConfig(level=logging.INFO)
evaluate_period_last_started = 100000
evaluate_period_last_finished = 100000
evaluate_daily_last_started = 100000
evaluate_daily_last_finished = 100000
def get_logger():
# Get named logger
return logging.getLogger(__name__)
def start_job():
global config
time.sleep(30)
get_logger().info('Started')
global evaluate_period_last_started
global evaluate_period_last_finished
global evaluate_daily_last_started
global evaluate_daily_last_finished
while getattr(threading.currentThread(), "do_run", True):
now = int(time.time())
ten_minute = 600
remainder_10min = now - (int(now / ten_minute) * ten_minute)
# if remainder < 2 minutes, do nothing
if remainder_10min >= 60:
#get_logger('nodes2_job', 'Check', remainder_10min, now)
if (now - evaluate_period_last_started) >= 300:
# evaluate period now
earliest_start_time = now - 4 * 24 * 3600
start_time = max(earliest_start_time, evaluate_period_last_started)
get_logger().info('Do evaluate_periods ' + str(remainder_10min) + ' ' + str(now) + ' ' + str(start_time))
evaluate_period_last_started = now
evaluate.evaluate_periods(start_time, now + ten_minute, ten_minute)
now = int(time.time())
evaluate_period_last_finished = now
get_logger().info('Done evaluate_periods ' + str(now) + ' dur ' + str(evaluate_period_last_finished - evaluate_period_last_started))
if (now - evaluate_daily_last_started) >= 1*3600:
# evaluate days now
period_day = 24 * 3600
now_day = int(now / period_day) * period_day
earliest_start_time = now_day - 4 * period_day
start_time = max(earliest_start_time, evaluate_daily_last_started)
end_time = now_day + period_day
get_logger().info('Do evaluate_days ' + str(now) + ' ' + str(start_time) + ' ' + str(end_time))
evaluate_daily_last_started = now
evaluate.evaluate_days(start_time, end_time)
now = int(time.time())
evaluate_daily_last_finished = now
get_logger().info('Done evaluate_days ' + str(now) + ' dur ' + str(evaluate_daily_last_finished - evaluate_daily_last_started))
time.sleep(5)
# payout for last 2+1 days
start_time = now - 3 * 24 *3600
get_logger().info('Do Payouts ' + str(now) + ' ' + str(start_time))
payout.do_payout(start_time, config)
get_logger().info('Done Payouts ' + str(now))
time.sleep(30)
get_logger().info('Stopping')
bg_thread = None
def start_background():
global bg_thread
bg_thread = threading.Thread(target=start_job)
bg_thread.start()
def stop_background():
global bg_thread
bg_thread.do_run = False
bg_thread.join()
|
gpufilestream.py
|
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-21 The Caer Authors <http://github.com/jasmcaus>
from threading import Thread
import time
import math
from queue import Queue
import cv2 as cv
from .constants import FRAME_COUNT, FPS
__all__ = [
'GPUFileStream'
]
class GPUFileStream:
r"""
This is an auxiliary class that enables Video Streaming using the GPU for caer with minimalistic latency, and at the expense of little to no additional computational requirements.
The basic idea behind it is to tracks and save the salient feature array for the given number of frames and then uses these anchor point to cancel out all perturbations relative to it for the incoming frames in the queue. This class relies heavily on **Threaded Queue mode** for error-free & ultra-fast frame handling.
Args:
source (int, str): Source path for the video. If ``source=0``, the default camera device is used. For
multiple external camera devices, use incremented values. For eg: ``source=1`` represents the second camera device on your system.
qsize (int): Default queue size for handling the video streams. Default: 128.
"""
def __init__(self, source, qsize=128):
"""
Source must be a path to a video file
Utilizes your system's GPU to process the stream
"""
if not isinstance(source, str):
raise ValueError(f'Expected either a filepath. Got {type(source)}. Consider using VideoStream which supports both live video as well as pre-existing videos')
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv.VideoCapture(source)
self.kill_stream = False
self.count = 0
# initialize the queue to store frames
self.Q = Queue(maxsize=qsize)
self.width = int(self.stream.get(cv.CAP_PROP_FRAME_WIDTH))
self.height = int(self.stream.get(cv.CAP_PROP_FRAME_HEIGHT))
self.res = (self.width, self.height)
self.fps = math.ceil(self.stream.get(FPS))
self.frames = int(self.stream.get(FRAME_COUNT))
# since we use UMat to store the images to
# we need to initialize them beforehand
self.qframes = [0] * qsize
for ii in range(qsize):
self.qframes[ii] = cv.UMat(self.height, self.width, cv.CV_8UC3)
def begin_stream(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
if self.kill_stream:
return
# otherwise, ensure the queue has room in it
if not self.Q.full():
self.count += 1
target = (self.count-1) % self.Q.maxsize
ret = self.stream.grab()
if not ret:
self.release()
return
self.stream.retrieve(self.qframes[target])
# add the frame to the queue
self.Q.put(target)
def read(self):
while (not self.more() and self.kill_stream):
time.sleep(0.1)
# return next frame in the queue
return self.qframes[self.Q.get()]
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def release(self):
self.kill_stream = True
# wait until stream resources are released
self.thread.join()
# Gets frame count
def count_frames(self):
if not self.kill_stream and not self.live_video:
return self.frames
# if get_opencv_version() == '2':
# return int(self.stream.get(FRAME_COUNT_DEPR))
# else:
# return int(self.stream.get(FRAME_COUNT))
if self.live_video:
print('[WARNING] Frames cannot be computed on live streams')
return -1
# Gets FPS count
def get_fps(self):
if not self.kill_stream:
return self.fps
# Get frame dimensions
def get_res(self):
return self.res
|
evaluation.py
|
import os
import json
import torch
import threading
import numpy as np
from functools import partial
from PyQt5 import QtWidgets
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from gui_utils.auxilary_utils import GetFolderWidget, GetFileWidget, FeatureListWidget
from models.rcnn import RecurrentCNN
from models.cnn_classifier import Classifier
from models.cnn_segmentator import Segmentator
from processing_utils.runner import BasicRunner
from processing_utils.roi import construct_ROI
from processing_utils.run_utils import Feature
class EvaluationParameterWindow(QtWidgets.QDialog):
"""
Evaluation Parameter Window, where one should choose parameters for evaluation
Parameters
----------
mode : str
A one of two 'all in one' of 'sequential'
parent : MainWindow(QtWidgets.QMainWindow)
-
Attributes
----------
mode : str
A one of two 'all in one' of 'sequential'
parent : MainWindow(QtWidgets.QMainWindow)
-
test_folder_getter : GetFolderWidget
A getter for a path to test data
model_weights_getter : GetFileWidget
A getter for a path to weights for 'all-in-one' model (optional)
classifier_weights_getter : GetFileWidget
A getter for a path to weights for 'all-in-one' model (optional)
peak_points_getter : QtWidgets.QLineEdit
A getter for peak_minimum_points parameter
segmentator_weights_getter : GetFileWidget
A getter for a path to weights for 'all-in-one' model (optional)
"""
def __init__(self, mode, parent=None):
self.mode = mode
self.parent = parent
super().__init__(parent)
self.setWindowTitle('peakonly: evaluation')
test_folder_label = QtWidgets.QLabel()
test_folder_label.setText('Choose a folder with test data:')
self.test_folder_getter = GetFolderWidget(os.path.join(os.getcwd(), 'data', 'test'), self)
if mode == 'all in one':
model_weights_label = QtWidgets.QLabel()
model_weights_label.setText("Choose weights for 'all-in-one' model")
# to do: save a pytorch script, not a model state
self.model_weights_getter = GetFileWidget('pt', os.path.join(os.getcwd(),
'data/weights/RecurrentCNN.pt'), self)
elif mode == 'sequential':
classifier_weights_label = QtWidgets.QLabel()
classifier_weights_label.setText('Choose weights for a classifier')
# to do: save a pytorch script, not a model state
self.classifier_weights_getter = GetFileWidget('pt', os.path.join(os.getcwd(),
'data/weights/Classifier.pt'), self)
segmentator_weights_label = QtWidgets.QLabel()
segmentator_weights_label.setText('Choose weights for a segmentator')
# to do: save a pytorch script, not a model state
self.segmentator_weights_getter = GetFileWidget('pt', os.path.join(os.getcwd(),
'data/weights/Segmentator.pt'), self)
else:
assert False, mode
peak_points_label = QtWidgets.QLabel()
peak_points_label.setText('Minimal length of peak:')
self.peak_points_getter = QtWidgets.QLineEdit(self)
self.peak_points_getter.setText('8')
run_button = QtWidgets.QPushButton('Run evaluation')
run_button.clicked.connect(self._run_evaluation)
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(test_folder_label)
main_layout.addWidget(self.test_folder_getter)
if mode == 'all in one':
main_layout.addWidget(model_weights_label)
main_layout.addWidget(self.model_weights_getter)
elif mode == 'sequential':
main_layout.addWidget(classifier_weights_label)
main_layout.addWidget(self.classifier_weights_getter)
main_layout.addWidget(segmentator_weights_label)
main_layout.addWidget(self.segmentator_weights_getter)
main_layout.addWidget(peak_points_label)
main_layout.addWidget(self.peak_points_getter)
main_layout.addWidget(run_button)
self.setLayout(main_layout)
def _run_evaluation(self):
try:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# to do: device should be customizable parameter
test_folder = self.test_folder_getter.get_folder()
if self.mode == 'all in one':
# to do: save models as pytorch scripts
model = RecurrentCNN().to(device)
path2weights = self.model_weights_getter.get_file()
model.load_state_dict(torch.load(path2weights, map_location=device))
model.eval()
models = [model]
elif self.mode == 'sequential':
classifier = Classifier().to(device)
path2classifier_weights = self.classifier_weights_getter.get_file()
classifier.load_state_dict(torch.load(path2classifier_weights, map_location=device))
classifier.eval()
segmentator = Segmentator().to(device)
path2segmentator_weights = self.segmentator_weights_getter.get_file()
segmentator.load_state_dict(torch.load(path2segmentator_weights, map_location=device))
segmentator.eval()
models = [classifier, segmentator]
else:
assert False, self.mode
minimum_peak_points = int(self.peak_points_getter.text())
runner = BasicRunner(self.mode, models,
minimum_peak_points, device)
main_window = EvaluationMainWindow(test_folder, runner, self.parent)
main_window.show()
self.close()
except ValueError:
# popup window with exception
msg = QtWidgets.QMessageBox(self)
msg.setText("Check parameters. Something is wrong!")
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.exec_()
class EvaluationMainWindow(QtWidgets.QDialog):
"""
Evaluation Main Window, where one can look into the model quality
Parameters
----------
test_folder : str
A path to folder with test data
runner : BasicRunner
-
parent : MainWindow(QtWidgets.QMainWindow)
-
Attributes
----------
test_folder : str
A path to folder with test data
runner : BasicRunner
-
parent : MainWindow(QtWidgets.QMainWindow)
-
tp_features : FeatureListWidget
true positives features
tn_features : FeatureListWidget
true negatives features
fp_features : FeatureListWidget
false positives features
fn_features : FeatureListWidget
false negatives features
figure : Figure
-
ax : Axes
-
canvas : FigureCanvasQTAgg
-
"""
def __init__(self, test_folder, runner, parent):
self.parent = parent
super().__init__(parent)
self.setWindowTitle('peakonly: evaluation')
self.test_folder = test_folder
self.runner = runner
self._init_ui()
def _init_ui(self):
# create lists of features
lists_layout = QtWidgets.QHBoxLayout()
tp_layout = QtWidgets.QVBoxLayout()
tp_label = QtWidgets.QLabel()
tp_label.setText('True positives:')
tp_layout.addWidget(tp_label)
self.tp_features = self.create_list_of_features()
tp_layout.addWidget(self.tp_features)
tp_next_button = QtWidgets.QPushButton('Next')
tp_next_button.clicked.connect(partial(self.next_feature, self.tp_features))
tp_layout.addWidget(tp_next_button)
lists_layout.addLayout(tp_layout)
tn_layout = QtWidgets.QVBoxLayout()
tn_label = QtWidgets.QLabel()
tn_label.setText('True negatives:')
tn_layout.addWidget(tn_label)
self.tn_features = self.create_list_of_features()
tn_layout.addWidget(self.tn_features)
tn_next_button = QtWidgets.QPushButton('Next')
tn_next_button.clicked.connect(partial(self.next_feature, self.tn_features))
tn_layout.addWidget(tn_next_button)
lists_layout.addLayout(tn_layout)
fp_layout = QtWidgets.QVBoxLayout()
fp_label = QtWidgets.QLabel()
fp_label.setText('False positives:')
fp_layout.addWidget(fp_label)
self.fp_features = self.create_list_of_features()
fp_layout.addWidget(self.fp_features)
fp_next_button = QtWidgets.QPushButton('Next')
fp_next_button.clicked.connect(partial(self.next_feature, self.fp_features))
fp_layout.addWidget(fp_next_button)
lists_layout.addLayout(fp_layout)
fn_layout = QtWidgets.QVBoxLayout()
fn_label = QtWidgets.QLabel()
fn_label.setText('False negatives:')
fn_layout.addWidget(fn_label)
self.fn_features = self.create_list_of_features()
fn_layout.addWidget(self.fn_features)
fn_next_button = QtWidgets.QPushButton('Next')
fn_next_button.clicked.connect(partial(self.next_feature, self.fn_features))
fn_layout.addWidget(fn_next_button)
lists_layout.addLayout(fn_layout)
# statistic button
right_half_layout = QtWidgets.QVBoxLayout()
right_half_layout.addLayout(lists_layout)
statistics_button = QtWidgets.QPushButton('Plot confusion matrix')
statistics_button.clicked.connect(self.plot_confusion_matrix)
right_half_layout.addWidget(statistics_button)
# Main canvas and toolbar
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111) # plot here
self.canvas = FigureCanvas(self.figure)
toolbar = NavigationToolbar(self.canvas, self)
canvas_layout = QtWidgets.QVBoxLayout()
canvas_layout.addWidget(toolbar)
canvas_layout.addWidget(self.canvas)
main_layout = QtWidgets.QHBoxLayout()
main_layout.addLayout(canvas_layout, 60)
main_layout.addLayout(right_half_layout, 40)
self.setLayout(main_layout)
thread = threading.Thread(target=self.update)
thread.start()
def create_list_of_features(self):
list_of_features = FeatureListWidget()
list_of_features.connectDoubleClick(self.feature_click)
return list_of_features
def feature_click(self, item):
list_widget = item.listWidget()
feature = list_widget.get_feature(item)
self.plot_feature(feature)
def next_feature(self, list_widget):
raw = list_widget.currentRow()
item = list_widget.item(min(raw + 1, list_widget.count() - 1))
list_widget.setCurrentItem(item)
feature = list_widget.get_feature(item)
self.plot_feature(feature)
def update(self):
for file in os.listdir(self.test_folder):
if file[0] != '.':
with open(os.path.join(self.test_folder, file)) as json_file:
dict_roi = json.load(json_file)
# get predicted features
roi = construct_ROI(dict_roi)
features = self.runner(roi, 'predicted/' + file)
# append gt (ground truth) features
for border in dict_roi['borders']:
gt = np.zeros(len(roi.i), dtype=np.bool)
gt[border[0]:border[1]+1] = 1
scan_frequency = (roi.scan[1] - roi.scan[0]) / (roi.rt[1] - roi.rt[0])
rtmin = roi.rt[0] + border[0] / scan_frequency
rtmax = roi.rt[0] + border[1] / scan_frequency
match = False
for feature in features:
if len(feature) == 1 and feature.samples[0][:2] == 'pr':
predicted_border = feature.borders[0]
pred = np.zeros(len(roi.i), dtype=np.bool)
pred[predicted_border[0]:predicted_border[1]+1] = 1
# calculate iou
intersection = (pred & gt).sum() # will be zero if Truth=0 or Prediction=0
union = (pred | gt).sum()
if intersection / union > 0.5:
match = True
feature.append('gt/' + file, roi, border, 0, np.sum(roi.i[border[0]:border[1]]),
roi.mzmean, rtmin, rtmax)
break
if not match:
features.append(Feature(['gt/' + file], [roi], [border], [0], [np.sum(roi.i[border[0]:border[1]])],
roi.mzmean, rtmin, rtmax, 0, 0))
# append tp, tn, fp, fn
for feature in features:
if len(feature) == 2:
self.tp_features.add_feature(feature)
elif len(feature) == 1 and feature.samples[0][:2] == 'pr':
self.fp_features.add_feature(feature)
elif len(feature) == 1 and feature.samples[0][:2] == 'gt':
self.fn_features.add_feature(feature)
else:
print(len(feature)), print(feature.samples[0][:2])
assert False, feature.samples
if len(features) == 0:
noise_feature = Feature(['noise/' + file], [roi], [[0, 0]], [0], [0],
roi.mzmean, roi.rt[0], roi.rt[1], 0, 0)
self.tn_features.add_feature(noise_feature)
def plot_feature(self, feature):
self.ax.clear()
feature.plot(self.ax, shifted=False, show_legend=True)
self.canvas.draw() # refresh canvas
def plot_confusion_matrix(self):
# to do: create a window with stats
tp_features = self.tp_features.get_all()
tn_features = self.tn_features.get_all()
fp_features = self.fp_features.get_all()
fn_features = self.fn_features.get_all()
subwindow = EvaluationStatisticsWindow(tp_features, tn_features, fp_features, fn_features, self)
subwindow.show()
class EvaluationStatisticsWindow(QtWidgets.QDialog):
def __init__(self, tp_features, tn_features, fp_features, fn_features, parent):
self.parent = parent
super().__init__(parent)
self.setWindowTitle('evaluation: confusion matrix')
# auxiliary calculations
precision = len(tp_features) / (len(tp_features) + len(fp_features))
recall = len(tp_features) / (len(tp_features) + len(fn_features))
integration_accuracy = np.zeros(len(tp_features))
for i, feature in enumerate(tp_features):
integration_accuracy[i] = np.abs(feature.intensities[0] - feature.intensities[1]) / feature.intensities[1]
integration_accuracy = 1 - np.mean(integration_accuracy)
# print metrics
precision_label = QtWidgets.QLabel()
precision_label.setText(f'Precision = {precision:.2f}')
recall_label = QtWidgets.QLabel()
recall_label.setText(f'Recall = {recall:.2f}')
integration_accuracy_label = QtWidgets.QLabel()
integration_accuracy_label.setText(f'Integration accuracy = {integration_accuracy:.2f}')
# canvas for confusion matrix
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111) # plot here
self.canvas = FigureCanvas(self.figure)
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(self.canvas)
main_layout.addWidget(precision_label)
main_layout.addWidget(recall_label)
main_layout.addWidget(integration_accuracy_label)
self.setLayout(main_layout)
self.plot_confusion_matrix(len(tp_features), len(tn_features), len(fp_features), len(fn_features))
def plot_confusion_matrix(self, tp, tn, fp, fn):
confusion_matrix = np.zeros((2, 2), np.int)
confusion_matrix[0, 0] = tp
confusion_matrix[0, 1] = fp
confusion_matrix[1, 0] = fn
confusion_matrix[1, 1] = tn
self.ax.set_title("Confusion matrix")
res = self.ax.imshow(confusion_matrix, cmap='GnBu', interpolation='nearest')
self.figure.colorbar(res)
self.ax.set_xticks(np.arange(2))
self.ax.set_xticklabels(['peak', 'noise'])
self.ax.set_yticks(np.arange(2))
self.ax.set_yticklabels(['peak', 'noise'])
self.ax.set_ylabel("predicted")
self.ax.set_xlabel("ground truth")
for i, row in enumerate(confusion_matrix):
for j, count in enumerate(row):
plt.text(j, i, count, fontsize=14, horizontalalignment='center', verticalalignment='center')
self.canvas.draw()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, require_v8
from tools import shared
from tools import ports
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def also_with_wasmfs(f):
def metafunc(self, wasmfs):
if wasmfs:
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'wasmfs': (True,)}
return metafunc
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
if EMTEST_BROWSER != 'node':
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL', '-s', 'EXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'])
@also_with_wasmfs
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
# TODO: change this when wasmfs supports relative paths.
if self.get_setting('WASMFS'):
path = "/" + path
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# TODO: WASMFS doesn't support the rest of this test yet. Exit early.
if self.get_setting('WASMFS'):
return
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src/shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(Path('filesystem/dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@also_with_wasmfs
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-s', 'ENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(Path('sub/file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(Path('subdir/file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
for filename in ['hello_world_gles.c', 'hello_world_gles_full.c', 'hello_world_gles_full_944.c']:
print(filename)
cmd = [test_file(filename), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')]
if 'full' in filename:
cmd += ['-s', 'FULL_ES2=1']
self.compile_btest(cmd)
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook/CH02_HelloTriangle.png')),
# (Path('glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook/CH09_TextureWrap.png')),
# (Path('glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook/CH09_SimpleTexture2D.png')),
(Path('glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook/CH10_MultiTexture.png')),
(Path('glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook/Common'),
test_file('glbook/Common/esUtil.c'),
test_file('glbook/Common/esShader.c'),
test_file('glbook/Common/esShapes.c'),
test_file('glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0'],
]:
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-s', 'MAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
args = ['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1']
# with assertions, we notice when memory was written to too early
expected = 'abort:Assertion failed: native function `note` called before runtime initialization'
self.btest('mem_init.cpp', expected=expected, args=args)
# otherwise, we just overwrite
self.btest_exit('mem_init.cpp', args=args + ['-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
maybeReportResultToServer('got_error');
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
self.set_setting('EXIT_RUNTIME')
test('test.html.mem', 'exit:0')
test('nothing.nowhere', 'got_error')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3:' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-s', 'EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_with_pthread_compilation_fails(self):
self.run_process([EMCC, '-c', '-o', 'hello.o', test_file('hello_world.c')])
stderr = self.expect_fail([EMCC, 'hello.o', '-o', 'a.js', '-g', '--closure=1', '-s', 'USE_PTHREADS', '-s', 'BUILD_AS_WORKER=1'])
self.assertContained('error: USE_PTHREADS + BUILD_AS_WORKER require separate modes that don\'t work together, see https://github.com/emscripten-core/emscripten/issues/8854', stderr)
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
time.sleep(10)
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-s', 'MAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest(test_file('uuid/test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-s', 'ASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-s', 'EXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(ports.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_mainloop.cpp', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message);
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?Aborted(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'EXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-s', 'MAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-s', 'SIDE_MODULE', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-s', 'MAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-s', 'MAIN_MODULE=2', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src/shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_trap(self):
create_file('pre.js', '''
if (typeof window === 'object' && window) {
window.addEventListener('error', function(e) {
if (e.error && e.error.message.includes('unreachable'))
maybeReportResultToServer("expected exception caught");
else
maybeReportResultToServer("unexpected: " + e);
});
}''')
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'EXIT_RUNTIME',
'--profiling-funcs',
'--pre-js=pre.js']
self.btest(test_file('pthread/test_pthread_trap.c'), expected='expected exception caught', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5787), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-s', 'MAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
@requires_graphics_hardware
def test_webgpu_basic_rendering(self):
for args in [[], ['-s', 'ASSERTIONS', '--closure=1'], ['-s', 'MAIN_MODULE=1']]:
self.btest_exit('webgpu_basic_rendering.cpp', args=['-s', 'USE_WEBGPU'] + args)
def test_webgpu_get_device(self):
for args in [['-s', 'ASSERTIONS', '--closure=1']]:
self.btest_exit('webgpu_get_device.cpp', args=['-s', 'USE_WEBGPU'] + args)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.cpp', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs/hello_file.txt'), Path('dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs/hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest_exit('cstdio/test_remove.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd/close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd/access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd/unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'': ([],),
'modularize': (['-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3': (['-O3'],),
'O3_modularize': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
})
def test_minimal_runtime_hello_thread(self, opts):
self.btest_exit(test_file('pthread/hello_thread.c'), expected='1', args=['--closure=1', '-sMINIMAL_RUNTIME', '-sUSE_PTHREADS'] + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-s', 'USE_PTHREADS', '-s', 'EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest_exit(test_file('small_hello_world.c'), args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-gsource-map', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_system(self):
self.btest_exit(test_file('system.c'))
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@require_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
# Tests that Emscripten-compiled applications can be run when a slash in the URL query or fragment of the js file
def test_browser_run_with_slash_in_query_and_hash(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O0'])
src = open('test.html').read()
# Slash in query
create_file('test-query.html', src.replace('test.js', 'test.js?type=pass/fail'))
self.run_browser('test-query.html', None, '/report_result?0')
# Slash in fragment
create_file('test-hash.html', src.replace('test.js', 'test.js#pass/fail'))
self.run_browser('test-hash.html', None, '/report_result?0')
# Slash in query and fragment
create_file('test-query-hash.html', src.replace('test.js', 'test.js?type=pass/fail#pass/fail'))
self.run_browser('test-query-hash.html', None, '/report_result?0')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
crawler-lvl2.py
|
import logging
import os
import signal
import time
from threading import Thread, Lock
import pandas as pd
import requests
import yaml
from TSEData import TSEData, helper
with open('./config.yaml') as f:
conf = yaml.safe_load(f)
helper.folder_check(conf['general']['log_path'])
logging.basicConfig(filename=os.path.join(conf['general']['log_path'], 'crawler_lvl2.log'),
format=conf['general']['log_format'],
level=logging.getLevelName(conf['crawler_lvl2']['log_level'].upper()),
filemode='w')
logger = logging.getLogger('crawler-lvl2.py')
index_list_file = os.path.join(conf['update_index']['path'], conf['update_index']['file_name'])
lvl2_data_path = conf['crawler_lvl2']['path']
helper.folder_check(lvl2_data_path)
assert os.path.isfile(index_list_file), f'{index_list_file} not found. first create one using update_index.py'
index_list = pd.read_csv(index_list_file)
assert index_list['crawl'].any(), 'no index to crawl in index_list'
index_list['uid'] = index_list['uid'].str.replace('i', '')
index_list = index_list.loc[index_list['crawl'], ['uid', 'symbol']].to_numpy()
logger.info(f'{len(index_list)} indexes are found to update')
i2get = 0
general_counter = 0
tloc = Lock()
TERM = False
def termsig(*args):
print(f'termination signal received {args}')
print(f'exiting. please wait ...')
global TERM
TERM = True
# noinspection PyBroadException
def twork():
global i2get, general_counter, TERM
sess = requests.session()
while True:
with tloc:
if TERM:
sess.close()
break
if i2get == len(index_list):
i2get = 0
uid, symbol = index_list[i2get]
i2get += 1
general_counter += 1
try:
data = TSEData.last_data_lvl2(uid, symbol, sess)
except Exception:
logger.exception(f'error crawling (uid:{uid})')
else:
data.to_json(os.path.join(lvl2_data_path, f'{uid}.json'),force_ascii=False)
if conf['crawler_lvl2']['print_on_stdout']:
print(data)
logger.info(f'fetched and saved (uid:{uid})')
n_thread = min(conf['crawler_lvl1']['max_workers'], len(index_list))
threads = []
for i_ in range(n_thread):
t = Thread(target=twork)
t.daemon = True
t.start()
threads.append(t)
logger.info(f'all threads are created ({n_thread})')
# noinspection PyTypeChecker
signal.signal(signal.SIGTERM, termsig)
print(f'send a SIGTERM to my pid ({os.getpid()}) to exit')
while True:
if TERM:
break
crawl_rate = general_counter / 10
general_counter = 0
if conf['crawler_lvl2']['verbose']:
print('crawl rate:', crawl_rate)
time.sleep(10)
for t in threads:
t.join()
|
oplog_manager.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import sys
import time
import threading
import pymongo
from pymongo import CursorType, errors as pymongo_errors
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
LOG = logging.getLogger(__name__)
class ReplicationLagLogger(threading.Thread):
"""Thread that periodically logs the current replication lag.
"""
def __init__(self, opman, interval):
super(ReplicationLagLogger, self).__init__()
self.opman = opman
self.interval = interval
self.daemon = True
def log_replication_lag(self):
checkpoint = self.opman.checkpoint
if checkpoint is None:
return
newest_write = retry_until_ok(self.opman.get_last_oplog_timestamp)
if newest_write < checkpoint:
# OplogThread will perform a rollback, don't log anything
return
lag_secs = newest_write.time - checkpoint.time
if lag_secs > 0:
LOG.info("OplogThread for replica set '%s' is %s seconds behind "
"the oplog.",
self.opman.replset_name, lag_secs)
else:
lag_inc = newest_write.inc - checkpoint.inc
if lag_inc > 0:
LOG.info("OplogThread for replica set '%s' is %s entries "
"behind the oplog.",
self.opman.replset_name, lag_inc)
else:
LOG.info("OplogThread for replica set '%s' is up to date "
"with the oplog.",
self.opman.replset_name)
def run(self):
while self.opman.is_alive():
self.log_replication_lag()
time.sleep(self.interval)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(self, primary_client, doc_managers,
oplog_progress_dict, namespace_config,
mongos_client=None, **kwargs):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get('batch_size', DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get('collection_dump', True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The namespace configuration
self.namespace_config = namespace_config
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get('continue_on_error', False)
LOG.info('OplogThread: Initializing oplog thread')
self.oplog = self.primary_client.local.oplog.rs
self.replset_name = (
self.primary_client.admin.command('ismaster')['setName'])
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
LOG.warning('%s %s' % (err_msg, self.primary_client))
def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry['op'] == 'n':
return True, False
ns = entry['ns']
if '.' not in ns:
return True, False
coll = ns.split('.', 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith('.chunks'):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[:-len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug("OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,))
return True, False
# Update the namespace.
entry['ns'] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry, include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields):
return True, False
return False, is_gridfs_file
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry['ts']
continue
# Sync the current oplog operation
operation = entry['op']
ns = entry['ns']
timestamp = util.bson_ts_to_long(entry['ts'])
for docman in self.doc_managers:
try:
LOG.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
docman.remove(
entry['o']['_id'], ns, timestamp)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split('.', 1)
gridfile = GridFSFile(
self.primary_client[db][coll],
doc)
docman.insert_file(
gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == 'u':
docman.update(entry['o2']['_id'],
entry['o'],
ns, timestamp)
update_inc += 1
# Command
elif operation == 'c':
# use unmapped namespace
doc = entry.get('o')
docman.handle_command(doc,
entry['ns'],
timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after "
"processing new oplog entries")
self.update_checkpoint(last_ts)
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
LOG.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.update_checkpoint(last_ts)
LOG.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
@classmethod
def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split('.')
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return []
@classmethod
def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == '.':
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == '.':
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1:],
doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches())
def _pop_excluded_fields(self, doc, exclude_fields, update=False):
# Remove all the fields that were passed in exclude_fields.
find_fields = self._find_update_fields if update else self._find_field
for field in exclude_fields:
for path, _ in find_fields(field, doc):
# Delete each matching field in the original document.
temp_doc = doc
for p in path[:-1]:
temp_doc = temp_doc[p]
temp_doc.pop(path[-1])
return doc # Need this to be similar to copy_included_fields.
def _copy_included_fields(self, doc, include_fields, update=False):
new_doc = {}
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# Copy each matching field in the original document.
temp_doc = new_doc
for p in path[:-1]:
temp_doc = temp_doc.setdefault(p, {})
temp_doc[path[-1]] = value
return new_doc
def filter_oplog_entry(self, entry, include_fields=None,
exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry['o']
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
entry['o'] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry['op'] == 'u' and ('$set' in entry_o or '$unset' in entry_o):
if '$set' in entry_o:
entry['o']["$set"] = filter_fields(
entry_o["$set"], fields, update=True)
if '$unset' in entry_o:
entry['o']["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o['$set']:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o['$unset']:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry['op'] == 'u':
entry['o'] = filter_fields(entry_o, fields)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {'op': {'$ne': 'n'}}
if timestamp is None:
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT)
else:
query['ts'] = {'$gte': timestamp}
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT,
oplog_replay=True)
return cursor
def get_collection(self, namespace):
"""Get a pymongo collection from a namespace."""
database, coll = namespace.split('.', 1)
return self.primary_client[database][coll]
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
# Flag if this oplog thread was cancelled during the collection dump.
# Use a list to workaround python scoping.
dump_cancelled = [False]
def get_all_ns():
ns_set = []
gridfs_ns_set = []
db_list = self.namespace_config.get_included_databases()
if not db_list:
# Only use listDatabases when the configured databases are not
# explicit.
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs chunks collections
if coll.endswith(".chunks"):
continue
if coll.endswith(".files"):
namespace = "%s.%s" % (database, coll)
namespace = namespace[:-len(".files")]
if self.namespace_config.gridfs_namespace(namespace):
gridfs_ns_set.append(namespace)
else:
namespace = "%s.%s" % (database, coll)
if self.namespace_config.map_namespace(namespace):
ns_set.append(namespace)
return ns_set, gridfs_ns_set
dump_set, gridfs_dump_set = get_all_ns()
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
def docs_to_dump(from_coll):
last_id = None
attempts = 0
projection = self.namespace_config.projection(from_coll.full_name)
# Loop to handle possible AutoReconnect
while attempts < 60:
if last_id is None:
cursor = retry_until_ok(
from_coll.find,
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = retry_until_ok(
from_coll.find,
{"_id": {"$gt": last_id}},
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
# Thread was joined while performing the
# collection dump.
dump_cancelled[0] = True
raise StopIteration
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_failed = 0
for namespace in dump_set:
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
for num, doc in enumerate(docs_to_dump(from_coll)):
try:
dm.upsert(doc, mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
if num % 10000 == 0:
LOG.info("Upserted %d out of approximately %d docs "
"from collection '%s'",
num + 1, total_docs, namespace)
if num is not None:
LOG.info("Upserted %d out of approximately %d docs from "
"collection '%s'",
num + 1, total_docs, namespace)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
for namespace in dump_set:
from_coll = self.get_collection(namespace)
total_docs = retry_until_ok(from_coll.count)
mapped_ns = self.namespace_config.map_namespace(
namespace)
LOG.info("*+*+*+* Bulk upserting approximately %d docs from "
"collection '%s'",
total_docs, namespace)
docs = docs_to_dump(from_coll)
dm.bulk_upsert(docs,
mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
LOG.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
if gridfs_dump_set:
LOG.info("OplogThread: dumping GridFS collections: %s",
gridfs_dump_set)
# Dump GridFS files
for gridfs_ns in gridfs_dump_set:
mongo_coll = self.get_collection(gridfs_ns)
from_coll = self.get_collection(gridfs_ns + '.files')
dest_ns = self.namespace_config.map_namespace(gridfs_ns)
for doc in docs_to_dump(from_coll):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical('Exception during collection dump',
exc_info=errors.get_nowait())
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
if dump_cancelled[0]:
LOG.warning('Initial collection dump was interrupted. '
'Will re-run the collection dump on next startup.')
return None
return timestamp
def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = self.oplog.find({'op': {'$ne': 'n'}}).sort(
'$natural', sort_order
).limit(-1)
try:
ts = next(curr)['ts']
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug("OplogThread: %s oplog entry has timestamp %s."
% ('Newest' if newest_entry else 'Oldest', ts))
return ts
def get_oldest_oplog_timestamp(self):
"""Return the timestamp of the oldest entry in the oplog.
"""
return self._get_oplog_timestamp(False)
def get_last_oplog_timestamp(self):
"""Return the timestamp of the newest entry in the oplog.
"""
return self._get_oplog_timestamp(True)
def _cursor_empty(self, cursor):
try:
# Tailable cursors can not have singleBatch=True in MongoDB > 3.3
next(cursor.clone().remove_option(CursorType.TAILABLE_AWAIT)
.limit(-1))
return False
except StopIteration:
return True
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and True if the cursor is empty.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
self.update_checkpoint(timestamp)
if timestamp is None:
return None, True
else:
# Collection dump disabled:
# Return cursor to beginning of oplog but do not set the
# checkpoint. The checkpoint will be set after an operation
# has been applied.
cursor = self.get_oplog_cursor()
return cursor, self._cursor_empty(cursor)
cursor = self.get_oplog_cursor(timestamp)
cursor_empty = self._cursor_empty(cursor)
if cursor_empty:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
self.update_checkpoint(self.rollback())
return self.init_cursor()
first_oplog_entry = next(cursor)
oldest_ts_long = util.bson_ts_to_long(
self.get_oldest_oplog_timestamp())
checkpoint_ts_long = util.bson_ts_to_long(timestamp)
if checkpoint_ts_long < oldest_ts_long:
# We've fallen behind, the checkpoint has fallen off the oplog
return None, True
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"])
if cursor_ts_long > checkpoint_ts_long:
# The checkpoint is not present in this oplog and the oplog
# did not rollover. This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process.
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor: new oplog entries found but "
"checkpoint is not present")
self.update_checkpoint(self.rollback())
return self.init_cursor()
# first entry has been consumed
return cursor, cursor_empty
def update_checkpoint(self, checkpoint):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if checkpoint is not None and checkpoint != self.checkpoint:
self.checkpoint = checkpoint
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
# If we have the repr of our oplog collection
# in the dictionary, remove it and replace it
# with our replica set name.
# This allows an easy upgrade path from mongo-connector 2.3.
# For an explanation of the format change, see the comment in
# read_last_checkpoint.
oplog_dict.pop(str(self.oplog), None)
oplog_dict[self.replset_name] = checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s",
checkpoint)
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
# In versions of mongo-connector 2.3 and before,
# we used the repr of the
# oplog collection as keys in the oplog_progress dictionary.
# In versions thereafter, we use the replica set name. For backwards
# compatibility, we check for both.
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
try:
# New format.
ret_val = oplog_dict[self.replset_name]
except KeyError:
try:
# Old format.
ret_val = oplog_dict[oplog_str]
except KeyError:
pass
LOG.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
self.checkpoint = ret_val
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}, 'op': {'$ne': 'n'}},
sort=[('$natural', pymongo.DESCENDING)]
)
LOG.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(
namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
projection=self.namespace_config.projection(
original_namespace)
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(document_id, namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
remov_inc += 1
LOG.debug(
"OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(doc,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception("OplogThread: Rollback, Unable to "
"insert %r" % doc)
LOG.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
|
plotting.py
|
"""PyVista plotting module."""
import platform
import ctypes
import sys
import pathlib
import collections.abc
from typing import Sequence
import logging
import os
import textwrap
import time
import warnings
import weakref
from functools import wraps
from threading import Thread
from typing import Dict
import numpy as np
import scooby
import pyvista
from pyvista import _vtk
from pyvista.utilities import (assert_empty_kwargs, convert_array,
convert_string_array, get_array,
is_pyvista_dataset, abstract_class,
numpy_to_texture, raise_not_matching,
wrap)
from ..utilities.regression import image_from_window
from ..utilities.misc import PyvistaDeprecationWarning
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .renderer import Renderer, Camera
from .tools import (normalize, opacity_transfer_function, parse_color,
parse_font_family, FONTS)
from .widgets import WidgetHelper
from .scalar_bars import ScalarBars
from .renderers import Renderers
from .render_window_interactor import RenderWindowInteractor
def _has_matplotlib():
try:
import matplotlib
return True
except ImportError: # pragma: no cover
return False
SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
VERY_FIRST_RENDER = True # windows plotter helper
# EXPERIMENTAL: permit pyvista to kill the render window
KILL_DISPLAY = platform.system() == 'Linux' and os.environ.get('PYVISTA_KILL_DISPLAY')
if KILL_DISPLAY: # pragma: no cover
# this won't work under wayland
try:
X11 = ctypes.CDLL("libX11.so")
X11.XCloseDisplay.argtypes = [ctypes.c_void_p]
except OSError:
warnings.warn('PYVISTA_KILL_DISPLAY: Unable to load X11.\n'
'Probably using wayland')
KILL_DISPLAY = False
def close_all():
"""Close all open/active plotters and clean up memory.
Returns
-------
bool
``True`` when all plotters have been closed.
"""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
log.addHandler(logging.StreamHandler())
def _warn_xserver(): # pragma: no cover
"""Check if plotting is supported and persist this state.
Check once and cache this value between calls. Warn the user if
plotting is not supported. Configured to check on Linux and Mac
OS since the Windows check is not quick.
"""
# disable windows check until we can get a fast way of verifying
# if windows has a windows manager (which it generally does)
if os.name == 'nt':
return
if not hasattr(_warn_xserver, 'has_support'):
_warn_xserver.has_support = pyvista.system_supports_plotting()
if not _warn_xserver.has_support:
# check if a display has been set
if 'DISPLAY' in os.environ:
return
# finally, check if using a backend that doesn't require an xserver
if pyvista.global_theme.jupyter_backend in ['ipygany']:
return
# Check if VTK has EGL support
ren_win_str = str(type(_vtk.vtkRenderWindow()))
if 'EGL' in ren_win_str or 'OSOpenGL' in ren_win_str:
return
warnings.warn('\n'
'This system does not appear to be running an xserver.\n'
'PyVista will likely segfault when rendering.\n\n'
'Try starting a virtual frame buffer with xvfb, or using\n '
' ``pyvista.start_xvfb()``\n')
USE_SCALAR_BAR_ARGS = """
"stitle" is a depreciated keyword and will be removed in a future
release.
Use ``scalar_bar_args`` instead. For example:
scalar_bar_args={'title': 'Scalar Bar Title'}
"""
@abstract_class
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and pyvistaqt.QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : str or sequence, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a Light Kit (to be precise, 5 separate lights
that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
"""
mouse_position = None
click_position = None
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None,
groups=None, row_weights=None, col_weights=None,
lighting='light kit', theme=None):
"""Initialize base plotter."""
log.debug('BasePlotter init start')
self._theme = pyvista.themes.DefaultTheme()
if theme is None:
# copy global theme to ensure local plot theme is fixed
# after creation.
self._theme.load_theme(pyvista.global_theme)
else:
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError('Expected ``pyvista.themes.DefaultTheme`` for '
f'``theme``, not {type(theme).__name__}.')
self._theme.load_theme(theme)
self.image_transparent_background = self._theme.transparent_background
# optional function to be called prior to closing
self.__before_close_callback = None
self._store_image = False
self.mesh = None
if title is None:
title = self._theme.title
self.title = str(title)
# add renderers
self.renderers = Renderers(self, shape, splitting_position, row_weights,
col_weights, groups, border, border_color,
border_width)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bars = ScalarBars(self)
# track if the camera has been setup
self._first_time = True
# Keep track of the scale
# track if render window has ever been rendered
self._rendered = False
# this helps managing closed plotters
self._closed = False
# lighting style; be forgiving with input (accept underscores
# and ignore case)
lighting_normalized = str(lighting).replace('_', ' ').lower()
if lighting_normalized == 'light kit':
self.enable_lightkit()
elif lighting_normalized == 'three lights':
self.enable_3_lights()
elif lighting_normalized != 'none':
raise ValueError(f'Invalid lighting option "{lighting}".')
# Add self to open plotters
self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}"
_ALL_PLOTTERS[self._id_name] = self
# Key bindings
self.reset_key_events()
log.debug('BasePlotter init stop')
self._image_depth_null = None
self.last_image_depth = None
self.last_image = None
self._has_background_layer = False
# set hidden line removal based on theme
if self.theme.hidden_line_removal:
self.enable_hidden_line_removal()
# set antialiasing based on theme
if self.theme.antialiasing:
self.enable_anti_aliasing()
@property
def theme(self):
"""Return or set the theme used for this plotter.
Examples
--------
Use the dark theme for a plotter.
>>> import pyvista
>>> from pyvista import themes
>>> pl = pyvista.Plotter()
>>> pl.theme = themes.DarkTheme()
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.show()
"""
return self._theme
@theme.setter
def theme(self, theme):
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError('Expected a pyvista theme like '
'``pyvista.themes.DefaultTheme``, '
f'not {type(theme).__name__}.')
self._theme.load_theme(pyvista.global_theme)
def import_gltf(self, filename, set_camera=True):
"""Import a glTF file into the plotter.
See https://www.khronos.org/gltf/ for more information.
Parameters
----------
filename : str
Path to the glTF file.
set_camera : bool, optional
Set the camera viewing angle to one compatible with the
default three.js perspective (``'xy'``).
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> helmet_file = examples.gltf.download_damaged_helmet() # doctest:+SKIP
>>> texture = examples.hdr.download_dikhololo_night() # doctest:+SKIP
>>> pl = pyvista.Plotter() # doctest:+SKIP
>>> pl.import_gltf(helmet_file) # doctest:+SKIP
>>> pl.set_environment_texture(cubemap) # doctest:+SKIP
>>> pl.camera.zoom(1.8) # doctest:+SKIP
>>> pl.show() # doctest:+SKIP
See :ref:`load_gltf` for a full example using this method.
"""
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Support for glTF requires VTK v9 or newer')
filename = os.path.abspath(os.path.expanduser(str(filename)))
if not os.path.isfile(filename):
raise FileNotFoundError(f'Unable to locate {filename}')
# lazy import here to avoid importing unused modules
from vtkmodules.vtkIOImport import vtkGLTFImporter
importer = vtkGLTFImporter()
importer.SetFileName(filename)
importer.SetRenderWindow(self.ren_win)
importer.Update()
# register last actor in actors
actor = self.renderer.GetActors().GetLastItem()
name = actor.GetAddressAsString("")
self.renderer._actors[name] = actor
# set camera position to a three.js viewing perspective
if set_camera:
self.camera_position = 'xy'
def export_html(self, filename):
"""Export this plotter as an interactive scene to a HTML file.
Parameters
----------
filename : str
Path to export the html file to.
Notes
-----
You will need ``ipywidgets`` and ``pythreejs`` installed for
this feature.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_uniform()
>>> pl = pyvista.Plotter(shape=(1,2))
>>> _ = pl.add_mesh(mesh, scalars='Spatial Point Data', show_edges=True)
>>> pl.subplot(0,1)
>>> _ = pl.add_mesh(mesh, scalars='Spatial Cell Data', show_edges=True)
>>> pl.export_html('pyvista.html') # doctest:+SKIP
"""
pythreejs_renderer = self.to_pythreejs()
# import after converting as we check for pythreejs import first
try:
from ipywidgets.embed import embed_minimal_html
except ImportError: # pragma: no cover
raise ImportError('Please install ipywidgets with:\n'
'\n\tpip install ipywidgets')
# convert and write to file
embed_minimal_html(filename, views=[pythreejs_renderer], title=self.title)
def to_pythreejs(self):
"""Convert this plotting scene to a pythreejs renderer.
Returns
-------
ipywidgets.Widget
Widget containing pythreejs renderer.
"""
self._on_first_render_request() # setup camera
from pyvista.jupyter.pv_pythreejs import convert_plotter
return convert_plotter(self)
def export_gltf(self, filename, inline_data=True, rotate_scene=True,
save_normals=True):
"""Export the current rendering scene as a glTF file.
Visit https://gltf-viewer.donmccurdy.com/ for an online viewer.
See https://vtk.org/doc/nightly/html/classvtkGLTFExporter.html
for limitations regarding the exporter.
Parameters
----------
filename : str
Path to export the gltf file to.
inline_data : bool, optional
Sets if the binary data be included in the json file as a
base64 string. When ``True``, only one file is exported.
rotate_scene : bool, optional
Rotate scene to be compatible with the glTF specifications.
save_normals : bool, optional
Saves the point array ``'Normals'`` as ``'NORMALS'`` in
the outputted scene.
Examples
--------
Output a simple point cloud represented as balls.
>>> import numpy as np
>>> import pyvista
>>> point_cloud = np.random.random((100, 3))
>>> pdata = pyvista.PolyData(point_cloud)
>>> pdata['orig_sphere'] = np.arange(100)
>>> sphere = pyvista.Sphere(radius=0.02)
>>> pc = pdata.glyph(scale=False, geom=sphere)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(pc, cmap='reds', smooth_shading=True,
... show_scalar_bar=False)
>>> pl.export_gltf('balls.gltf') # doctest:+SKIP
>>> pl.show()
Output the orientation plotter.
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.export_gltf('orientation_plotter.gltf') # doctest:+SKIP
>>> pl.show()
"""
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Support for glTF requires VTK v9 or newer')
if not hasattr(self, "ren_win"):
raise RuntimeError('This plotter has been closed and is unable to export '
'the scene.')
from vtkmodules.vtkIOExport import vtkGLTFExporter
# rotate scene to gltf compatible view
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateX(-90)
actor.RotateZ(-90)
if save_normals:
try:
mapper = actor.GetMapper()
if mapper is None:
continue
dataset = mapper.GetInputAsDataSet()
if 'Normals' in dataset.point_data:
# ensure normals are active
normals = dataset.point_data['Normals']
dataset.point_data.active_normals = normals.copy()
except:
pass
exporter = vtkGLTFExporter()
exporter.SetRenderWindow(self.ren_win)
exporter.SetFileName(filename)
exporter.SetInlineData(inline_data)
exporter.SetSaveNormal(save_normals)
exporter.Update()
# rotate back if applicable
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateZ(90)
actor.RotateX(90)
def enable_hidden_line_removal(self, all_renderers=True):
"""Enable hidden line removal.
Wireframe geometry will be drawn using hidden line removal if
the rendering engine supports it.
Disable this with :func:`disable_hidden_line_removal
<BasePlotter.disable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Create a side-by-side plotter and render a sphere in wireframe
with hidden line removal enabled on the left and disabled on
the right.
>>> import pyvista
>>> sphere = pyvista.Sphere(theta_resolution=20, phi_resolution=20)
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("With hidden line removal")
>>> pl.enable_hidden_line_removal(all_renderers=False)
>>> pl.subplot(0, 1)
>>> pl.disable_hidden_line_removal(all_renderers=False)
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("Without hidden line removal")
>>> pl.show()
"""
if all_renderers:
for renderer in self.renderers:
renderer.enable_hidden_line_removal()
else:
self.renderer.enable_hidden_line_removal()
def disable_hidden_line_removal(self, all_renderers=True):
"""Disable hidden line removal.
Enable again with :func:`enable_hidden_line_removal
<BasePlotter.enable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Enable and then disable hidden line removal.
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.enable_hidden_line_removal()
>>> pl.disable_hidden_line_removal()
"""
if all_renderers:
for renderer in self.renderers:
renderer.disable_hidden_line_removal()
else:
self.renderer.disable_hidden_line_removal()
@property
def scalar_bar(self):
"""First scalar bar. Kept for backwards compatibility."""
return list(self.scalar_bars.values())[0]
@property
def scalar_bars(self):
"""Scalar bars.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> plotter.scalar_bars
Scalar Bar Title Interactive
"Data" False
Select a scalar bar actor based on the title of the bar.
>>> plotter.scalar_bars['Data'] # doctest:+SKIP
(vtkmodules.vtkRenderingAnnotation.vtkScalarBarActor)0x7fcd3567ca00
"""
return self._scalar_bars
@property
def _before_close_callback(self):
"""Return the cached function (expecting a reference)."""
if self.__before_close_callback is not None:
return self.__before_close_callback()
@_before_close_callback.setter
def _before_close_callback(self, func):
"""Store a weakref.ref of the function being called."""
if func is not None:
self.__before_close_callback = weakref.ref(func)
else:
self.__before_close_callback = None
@property
def shape(self):
"""Shape of the plotter.
Examples
--------
Return the plotter shape.
>>> import pyvista
>>> plotter = pyvista.Plotter(shape=(2, 2))
>>> plotter.shape
(2, 2)
"""
return self.renderers._shape
@property
def renderer(self):
"""Return the active renderer.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.renderer # doctest:+SKIP
(Renderer)0x7f916129bfa0
"""
return self.renderers.active_renderer
@property
def store_image(self):
"""Store last rendered frame on close.
This is normally disabled to avoid caching the image, and is
enabled by default by setting:
``pyvista.BUILDING_GALLERY = True``
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter(off_screen=True)
>>> pl.store_image = True
>>> _ = pl.add_mesh(pyvista.Cube())
>>> pl.show()
>>> image = pl.last_image
>>> type(image) # doctest:+SKIP
<class 'numpy.ndarray'>
"""
return self._store_image
@store_image.setter
def store_image(self, value):
"""Store last rendered frame on close."""
self._store_image = bool(value)
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
Examples
--------
Create a 2 wide plot and set the background of right-hand plot
to orange. Add a cube to the left plot and a sphere to the
right.
>>> import pyvista
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> actor = pl.add_mesh(pyvista.Cube())
>>> pl.subplot(0, 1)
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.set_background('orange', all_renderers=False)
>>> pl.show()
"""
self.renderers.set_active_renderer(index_row, index_column)
@wraps(Renderer.add_legend)
def add_legend(self, *args, **kwargs):
"""Wrap ``Renderer.add_legend``."""
return self.renderer.add_legend(*args, **kwargs)
@wraps(Renderer.remove_legend)
def remove_legend(self, *args, **kwargs):
"""Wrap ``Renderer.remove_legend``."""
return self.renderer.remove_legend(*args, **kwargs)
@property
def legend(self):
"""Legend actor.
There can only be one legend actor per renderer. If
``legend`` is ``None``, there is no legend actor.
"""
return self.renderer.legend
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
def enable_3_lights(self, only_active=False):
"""Enable 3-lights illumination.
This will replace all pre-existing lights in the scene.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default
is that every renderer is affected.
Examples
--------
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.enable_3_lights()
>>> pl.show()
Note how this varies from the default plotting.
>>> pl = demos.orientation_plotter()
>>> pl.show()
"""
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
intensities = [1, 0.6, 0.5]
all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)]
for intensity, angles in zip(intensities, all_angles):
light = pyvista.Light(light_type='camera light')
light.intensity = intensity
light.position = _to_pos(*angles)
for renderer in renderers:
renderer.add_light(light)
def disable_3_lights(self):
"""Please use ``enable_lightkit``, this method has been depreciated."""
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``')
def enable_lightkit(self, only_active=False):
"""Enable the default light-kit lighting.
See:
https://www.researchgate.net/publication/2926068
This will replace all pre-existing lights in the renderer.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default is that
every renderer is affected.
Examples
--------
Create a plotter without any lights and then enable the
default light kit.
>>> import pyvista
>>> pl = pyvista.Plotter(lighting=None)
>>> pl.enable_lightkit()
>>> actor = pl.add_mesh(pyvista.Cube(), show_edges=True)
>>> pl.show()
"""
renderers = [self.renderer] if only_active else self.renderers
light_kit = _vtk.vtkLightKit()
for renderer in renderers:
renderer.remove_all_lights()
# Use the renderer as a vtkLightKit parser.
# Feed it the LightKit, pop off the vtkLights, put back
# pyvista Lights. This is the price we must pay for using
# inheritance rather than composition.
light_kit.AddLightsToRenderer(renderer)
vtk_lights = renderer.lights
renderer.remove_all_lights()
for vtk_light in vtk_lights:
light = pyvista.Light.from_vtk(vtk_light)
renderer.add_light(light)
renderer.LightFollowCameraOn()
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
for renderer in self.renderers:
renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
log.debug('set_focus: %s, %s', str(args), str(kwargs))
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.enable_shadows)
def enable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.enable_shadows``."""
return self.renderer.enable_shadows(*args, **kwargs)
@wraps(Renderer.disable_shadows)
def disable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.disable_shadows``."""
return self.renderer.disable_shadows(*args, **kwargs)
@property
def parallel_projection(self):
"""Return parallel projection state of active render window."""
return self.renderer.parallel_projection
@parallel_projection.setter
def parallel_projection(self, state):
"""Set parallel projection state of all active render windows."""
self.renderer.parallel_projection = state
@property
def parallel_scale(self):
"""Return parallel scale of active render window."""
return self.renderer.parallel_scale
@parallel_scale.setter
def parallel_scale(self, value):
"""Set parallel scale of all active render windows."""
self.renderer.parallel_scale = value
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, *args, **kwargs):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(*args, **kwargs)
return True
@wraps(Renderer.set_environment_texture)
def set_environment_texture(self, *args, **kwargs):
"""Wrap ``Renderer.set_environment_texture``."""
return self.renderer.set_environment_texture(*args, **kwargs)
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
if not self.camera_set:
self.camera_position = self.get_default_cam_pos()
self.reset_camera()
self.camera_set = True
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the active render window."""
return self.renderers.active_renderer.GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
@property
def window_size(self):
"""Return the render window size in ``(width, height)``.
Examples
--------
Change the window size from ``200 x 200`` to ``400 x 400``.
>>> import pyvista
>>> pl = pyvista.Plotter(window_size=[200, 200])
>>> pl.window_size
[200, 200]
>>> pl.window_size = [400, 400]
>>> pl.window_size
[400, 400]
"""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
def _check_rendered(self):
"""Check if the render window has been shown and raise an exception if not."""
if not self._rendered:
raise AttributeError('\nThis plotter has not yet been setup and rendered '
'with ``show()``.\n'
'Consider setting ``off_screen=True`` '
'for off screen rendering.\n')
def _check_has_ren_win(self):
"""Check if render window attribute exists and raise an exception if not."""
if not hasattr(self, 'ren_win'):
raise AttributeError('\n\nTo retrieve an image after the render window '
'has been closed, set:\n\n'
' ``plotter.store_image = True``\n\n'
'before closing the plotter.')
@property
def image(self):
"""Return an image array of current render window.
To retrieve an image after the render window has been closed,
set: ``plotter.store_image = True`` before closing the plotter.
"""
if not hasattr(self, 'ren_win') and self.last_image is not None:
return self.last_image
self._check_rendered()
self._check_has_ren_win()
data = image_from_window(self.ren_win)
if self.image_transparent_background:
return data
# ignore alpha channel
return data[:, :, :-1]
def render(self):
"""Render the main window.
Does nothing until ``show`` has been called.
"""
if hasattr(self, 'ren_win') and not self._first_time:
log.debug('Rendering')
self.ren_win.Render()
self._rendered = True
@wraps(RenderWindowInteractor.add_key_event)
def add_key_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.add_key_event."""
if hasattr(self, 'iren'):
self.iren.add_key_event(*args, **kwargs)
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key.
Parameters
----------
key : str
Key to clear events for.
"""
self.iren.clear_events_for_key(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.get_event_position()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.get_event_position()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks
supported here - use
:func:`pyvista.BasePlotter.track_click_position` instead.
"""
self.iren.track_mouse_position(self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self.iren.untrack_mouse_position()
@wraps(RenderWindowInteractor.track_click_position)
def track_click_position(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.track_click_position."""
self.iren.track_click_position(*args, **kwargs)
def untrack_click_position(self):
"""Stop tracking the click position."""
self.iren.untrack_click_position()
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size
and line width by the given value.
Parameters
----------
increment : float
Amount to increment point size and line width.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
if hasattr(self, 'iren'):
self.iren.clear_key_event_callbacks()
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self.iren.add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
@wraps(RenderWindowInteractor.key_press_event)
def key_press_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.key_press_event."""
self.iren.key_press_event(*args, **kwargs)
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
if hasattr(self.ren_win, 'GetOffScreenFramebuffer'):
if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex():
# must raise a runtime error as this causes a segfault on VTK9
raise ValueError('Invoking helper with no framebuffer')
# Get 2D click location on window
click_pos = self.iren.get_event_position()
# Get corresponding click location in the 3D plot
picker = _vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
@wraps(RenderWindowInteractor.enable_trackball_style)
def enable_trackball_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_style."""
self.iren.enable_trackball_style()
@wraps(RenderWindowInteractor.enable_trackball_actor_style)
def enable_trackball_actor_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_actor_style."""
self.iren.enable_trackball_actor_style()
@wraps(RenderWindowInteractor.enable_image_style)
def enable_image_style(self):
"""Wrap RenderWindowInteractor.enable_image_style."""
self.iren.enable_image_style()
@wraps(RenderWindowInteractor.enable_joystick_style)
def enable_joystick_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_style."""
self.iren.enable_joystick_style()
@wraps(RenderWindowInteractor.enable_joystick_actor_style)
def enable_joystick_actor_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_actor_style."""
self.iren.enable_joystick_actor_style()
@wraps(RenderWindowInteractor.enable_zoom_style)
def enable_zoom_style(self):
"""Wrap RenderWindowInteractor.enable_zoom_style."""
self.iren.enable_zoom_style()
@wraps(RenderWindowInteractor.enable_terrain_style)
def enable_terrain_style(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.enable_terrain_style."""
self.iren.enable_terrain_style(*args, **kwargs)
@wraps(RenderWindowInteractor.enable_rubber_band_style)
def enable_rubber_band_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_style."""
self.iren.enable_rubber_band_style()
@wraps(RenderWindowInteractor.enable_rubber_band_2d_style)
def enable_rubber_band_2d_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_2d_style."""
self.iren.enable_rubber_band_2d_style()
def enable_stereo_render(self):
"""Enable stereo rendering.
Disable this with :func:`disable_stereo_render
<BasePlotter.disable_stereo_render>`
Examples
--------
Enable stereo rendering to show a cube as an anaglyph image.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOn()
self.ren_win.SetStereoTypeToAnaglyph()
def disable_stereo_render(self):
"""Disable stereo rendering.
Enable again with :func:`enable_stereo_render
<BasePlotter.enable_stereo_render>`
Examples
--------
Enable and then disable stereo rendering. It should show a simple cube.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.disable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOff()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.get_interactor_style()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor
in milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if self.iren is not None:
update_rate = self.iren.get_desired_update_rate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.create_repeating_timer(stime)
self.render()
Plotter.last_update_time = curr_time
return
if force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=None, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=None, categories=False, silhouette=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, pbr=False, metallic=0.0, roughness=0.5,
render=True, component=None, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.DataSet or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy
arrays of XYZ points.
color : str or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : str, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name
of an array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active
scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : str or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also
applicable when style='points'. Default ``5.0``.
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it
will be the global opacity of the mesh and uniformly
applied everywhere - should be between 0 and 1. A string
can also be specified to map the scalars range to a
predefined opacity transfer function (options include:
'linear', 'linear_r', 'geom', 'geom_r'). A string could
also be used to map a scalars array from the mesh to the
opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in
length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r``
suffix to do this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default ``False``.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is
``True``. When ``False``, OpenGL will interpolate the
mapped colors which can result is showing colors that are
not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to use when mapping the
``scalars``. See available Matplotlib colormaps. Only
applicable for when displaying ``scalars``. Requires
Matplotlib to be installed. ``colormap`` is also an
accepted alias for this. If ``colorcet`` or ``cmocean``
are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or bool, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
render_lines_as_tubes : bool, optional
Show lines as thick tubes rather than flat lines. Control
the width with ``line_width``.
smooth_shading : bool, optional
Enable smooth shading when ``True`` using either the
Gouraud or Phong shading algorithm. When ``False``, use
flat shading.
Automatically enabled when ``pbr=True``.
ambient : float, optional
When lighting is enabled, this is the amount of light in
the range of 0 to 1 (default 0.0) that reaches the actor
when not directed at the light source emitted from the
viewer.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0.
specular : float, optional
The specular lighting coefficient. Default 0.0.
specular_power : float, optional
The specular power. Between 0.0 and 128.0.
nan_color : str or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted
scalar array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0.
culling : str, optional
Does not render faces that are culled. Options are
``'front'`` or ``'back'``. This can be helpful for dense
surface meshes, especially when edges are visible, but can
cause flat meshes to be partially displayed. Defaults to
``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot
those values as RGB(A) colors. ``rgba`` is also an
accepted alias for this. Opacity (the A) is optional. If
a scalars array ending with ``"_rgba"`` is passed, the default
becomes ``True``. This can be overridden by setting this
parameter to ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in
the scalar array will be used as the ``n_colors``
argument.
silhouette : dict, bool, optional
If set to ``True``, plot a silhouette highlight for the
mesh. This feature is only available for a triangulated
``PolyData``. As a ``dict``, it contains the properties
of the silhouette to display:
* ``color``: ``str`` or 3-item ``list``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond
to transparency.
below_color : str or 3 item list, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``below_label`` to ``'Below'``.
above_color : str or 3 item list, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``above_label`` to ``'Above'``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this actor is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
log_scale : bool, optional
Use log scale when mapping data to colors. Scalars less
than zero are mapped to the smallest representable
positive float. Default: ``True``.
pbr : bool, optional
Enable physics based rendering (PBR) if the mesh is
``PolyData``. Use the ``color`` argument to set the base
color. This is only available in VTK>=9.
metallic : float, optional
Usually this value is either 0 or 1 for a real material
but any value in between is valid. This parameter is only
used by PBR interpolation. Default value is 0.0.
roughness : float, optional
This value has to be between 0 (glossy) and 1 (rough). A
glossy material has reflections and a high specular
part. This parameter is only used by PBR
interpolation. Default value is 0.5.
render : bool, optional
Force a render when ``True``. Default ``True``.
component : int, optional
Set component of vector valued scalars to plot. Must be
nonnegative, if supplied. If ``None``, the magnitude of
the vector is plotted.
**kwargs : dict, optional
Optional developer keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the mesh.
Examples
--------
Add a sphere to the plotter and show it with a custom scalar
bar title.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere,
... scalar_bar_args={'title': 'Z Position'})
>>> plotter.show()
Plot using RGB on a single cell. Note that since the number of
points and the number of cells are identical, we have to pass
``preference='cell'``.
>>> import pyvista
>>> import numpy as np
>>> vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]])
>>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]])
>>> mesh = pyvista.PolyData(vertices, faces)
>>> mesh.cell_data['colors'] = [[255, 255, 255],
... [0, 255, 0],
... [0, 0, 255],
... [255, 0, 0]]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='cell')
>>> plotter.camera_position='xy'
>>> plotter.show()
Note how this varies from ``preference=='point'``. This is
because each point is now being individually colored, versus
in ``preference=='point'``, each cell face is individually
colored.
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='point')
>>> plotter.camera_position='xy'
>>> plotter.show()
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError(f'Object type ({type(mesh)}) not supported for plotting in PyVista.')
##### Parse arguments to be used for all meshes #####
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {'n_colors': n_colors}
else:
scalar_bar_args = scalar_bar_args.copy()
if show_edges is None:
show_edges = self._theme.show_edges
if edge_color is None:
edge_color = self._theme.edge_color
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if lighting is None:
lighting = self._theme.lighting
if smooth_shading is None:
if pbr:
smooth_shading = True
else:
smooth_shading = self._theme.smooth_shading
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = self._theme.render_points_as_spheres
if name is None:
name = f'{type(mesh).__name__}({mesh.memory_address})'
if nan_color is None:
nan_color = self._theme.nan_color
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = self._theme.color
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise TypeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if _has_matplotlib():
import matplotlib
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, _vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
if silhouette:
if isinstance(silhouette, dict):
self.add_silhouette(mesh, silhouette)
else:
self.add_silhouette(mesh)
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_data['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
if texture:
_tcoords = mesh.active_t_coords
mesh.compute_normals(cell_normals=False, inplace=True)
if texture:
mesh.active_t_coords = _tcoords
if mesh.n_points < 1:
raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.')
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
scalar_bar_args.setdefault('title', scalars)
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(_vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor = _vtk.vtkActor()
prop = _vtk.vtkProperty()
actor.SetMapper(self.mapper)
actor.SetProperty(prop)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
# enable rgb if the scalars name ends with rgb or rgba
if rgb is None:
if scalars.endswith('_rgb') or scalars.endswith('_rgba'):
rgb = True
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
scalar_bar_args.setdefault('title', original_scalar_name)
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (_vtk.vtkTexture, _vtk.vtkOpenGLTexture)):
raise TypeError(f'Invalid texture type ({type(texture)})')
if mesh.GetPointData().GetTCoords() is None:
raise ValueError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# see https://github.com/pyvista/pyvista/issues/950
mesh.set_active_scalars(None)
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
if np.any(opacity > 1):
warnings.warn("Opacity scalars contain values over 1")
if np.any(opacity < 0):
warnings.warn("Opacity scalars contain values less than 0")
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise ValueError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
_custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if _has_matplotlib():
cmap = self._theme.cmap
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
else:
title = scalar_bar_args.get('title', 'Data')
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = f'{title}-digitized'
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
show_scalar_bar = False
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
if not isinstance(component, (int, type(None))):
raise TypeError('component must be either None or an integer')
if component is None:
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
elif component < scalars.shape[1] and component >= 0:
scalars = scalars[:, component].copy()
title = '{}-{}'.format(title, component)
else:
raise ValueError(
('component must be nonnegative and less than the '
'dimensionality of the scalars array: {}').format(
scalars.shape[1]
)
)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool_:
scalars = scalars.astype(np.float_)
def prepare_mapper(scalars):
if (scalars.shape[0] == mesh.n_points and
scalars.shape[0] == mesh.n_cells):
use_points = preference == 'point'
use_cells = not use_points
else:
use_points = scalars.shape[0] == mesh.n_points
use_cells = scalars.shape[0] == mesh.n_cells
# Scalars interpolation approach
if use_points:
self.mesh.point_data.set_array(scalars, title, True)
self.mesh.active_scalars_name = title
self.mapper.SetScalarModeToUsePointData()
elif use_cells:
self.mesh.cell_data.set_array(scalars, title, True)
self.mesh.active_scalars_name = title
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, (int, float)):
clim = [-clim, clim]
if log_scale:
if clim[0] <= 0:
clim = [sys.float_info.min, clim[1]]
table.SetScaleToLog10()
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
# have to add the attribute to pass it onward to some classes
if isinstance(cmap, str):
self.mapper.cmap = cmap
# ipygany uses different colormaps
if self._theme.jupyter_backend == 'ipygany':
from ..jupyter.pv_ipygany import check_colormap
check_colormap(cmap)
else:
if not _has_matplotlib():
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:, -1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(_vtk.numpy_to_vtk(ctable))
if _custom_opac:
# need to round the colors here since we're
# directly displaying the colors
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = np.round(hue*n_colors)/n_colors
scalars = cmap(scalars)*255
scalars[:, -1] *= opacity
scalars = scalars.astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = self._theme.outline_color
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise ValueError('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if pbr:
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Physically based rendering requires VTK 9 '
'or newer')
prop.SetInterpolationToPBR()
prop.SetMetallic(metallic)
prop.SetRoughness(roughness)
elif smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color, default_color=self._theme.color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
geom = pyvista.Triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [geom, label, rgb_color]
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
self.add_actor(actor, reset_camera=reset_camera, name=name, culling=culling,
pickable=pickable, render=render)
# hide scalar bar if using special scalars
if scalar_bar_args.get('title') == '__custom_rgba':
show_scalar_bar = False
# Only show scalar bar if there are scalars
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0,
render=True, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
resolution : list, optional
Block resolution.
opacity : str or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and
``'smart'``. If ``None`` the ``"volume_mapper"`` in the
``self._theme`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this mesh is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity
transfer function is defined. Meaning that over that
distance, a given opacity (from the transfer function) is
accumulated. This is adjusted for the actual sampling
distance during rendering. By default, this is the length
of the diagonal of the bounding box of the volume divided
by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may
perform shading calculations - in some cases shading does
not apply (for example, in a maximum intensity projection)
and therefore shading will not be performed even if this
flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default ``1.0``.
specular : float, optional
The specular lighting coefficient. Default ``0.0``.
specular_power : float, optional
The specular power. Between ``0.0`` and ``128.0``.
render : bool, optional
Force a render when True. Default ``True``.
**kwargs : dict, optional
Optional keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the volume.
Examples
--------
Show a built-in volume example with the coolwarm colormap.
>>> from pyvista import examples
>>> import pyvista as pv
>>> bolt_nut = examples.download_bolt_nut()
>>> pl = pv.Plotter()
>>> _ = pl.add_volume(bolt_nut, cmap="coolwarm")
>>> pl.show()
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {}
else:
scalar_bar_args = scalar_bar_args.copy()
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if culling is True:
culling = 'backface'
if mapper is None:
mapper = self._theme.volume_mapper
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1, 1, 1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError(f'Object type ({type(volume)}) not supported for plotting in PyVista.')
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = f'{type(volume).__name__}({volume.memory_address})'
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power, render=render)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError(f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.')
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
scalar_bar_args.setdefault('title', volume.active_scalars_info[1])
else:
raise ValueError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data'
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
scalar_bar_args.setdefault('title', title)
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool_ or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float_)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': _vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': _vtk.vtkGPUVolumeRayCastMapper,
'open_gl': _vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': _vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise TypeError(f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}")
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume.point_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume.cell_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float_)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = _vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if _has_matplotlib():
cmap = self._theme.cmap
if cmap is not None:
if not _has_matplotlib():
raise ImportError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = _vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = _vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(_vtk.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError(f'Blending mode \'{blending}\' invalid. ' +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = _vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = _vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable, render=render)
# Add scalar bar if scalars are available
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_silhouette(self, mesh, params=None):
"""Add a silhouette of a PyVista or VTK dataset to the scene.
A silhouette can also be generated directly in
:func:`add_mesh <pyvista.Plotter.add_mesh>`. See also
:ref:`silhouette_example`.
Parameters
----------
mesh : pyvista.PolyData
Mesh for generating silhouette to plot.
params : dict, optional
* If not supplied, the default theme values will be used.
* ``color``: ``str`` or 3-item ``list``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
Returns
-------
vtk.vtkActor
VTK actor of the silhouette.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> bunny = examples.download_bunny()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(bunny, color='tan')
>>> _ = plotter.add_silhouette(bunny,
... params={'color': 'red', 'line_width': 8.0})
>>> plotter.view_xy()
>>> plotter.show()
"""
silhouette_params = self._theme.silhouette.to_dict()
if params:
silhouette_params.update(params)
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not isinstance(mesh, pyvista.PolyData):
raise TypeError(f"Expected type is `PolyData` but {type(mesh)} was given.")
if isinstance(silhouette_params["decimate"], float):
silhouette_mesh = mesh.decimate(silhouette_params["decimate"])
else:
silhouette_mesh = mesh
alg = _vtk.vtkPolyDataSilhouette()
alg.SetInputData(silhouette_mesh)
alg.SetCamera(self.renderer.camera)
if silhouette_params["feature_angle"] is not None:
alg.SetEnableFeatureAngle(True)
alg.SetFeatureAngle(silhouette_params["feature_angle"])
else:
alg.SetEnableFeatureAngle(False)
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(parse_color(silhouette_params["color"]))
prop.SetOpacity(silhouette_params["opacity"])
prop.SetLineWidth(silhouette_params["line_width"])
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
clim : sequence
The new range of scalar bar. Two item list (e.g. ``[-1, 2]``).
name : str, optional
The title of the scalar bar to update.
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise AttributeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.clear()
>>> plotter.renderer.actors
{}
"""
self.renderers.clear()
self.scalar_bars.clear()
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, (int, np.integer)):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
return
views = np.asarray(views)
if np.issubdtype(views.dtype, np.integer):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
f'{type(views)} is given')
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None, int, tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = Camera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = Camera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.abc.Iterable):
for view_index in views:
self.renderers[view_index].camera = Camera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
f'{type(views)} is given')
@wraps(ScalarBars.add_scalar_bar)
def add_scalar_bar(self, *args, **kwargs):
"""Wrap for ``ScalarBars.add_scalar_bar``."""
# only render when the plotter has already been shown
render = kwargs.get('render', None)
if render is None:
kwargs['render'] = not self._first_time
# check if maper exists
mapper = kwargs.get('mapper', None)
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise AttributeError('Mapper does not exist. '
'Add a mesh with scalars first.')
kwargs['mapper'] = self.mapper
# title can be the first and only arg
if len(args):
title = args[0]
else:
title = kwargs.get('title', '')
if title is None:
title = ''
kwargs['title'] = title
interactive = kwargs.get('interactive', None)
if interactive is None:
interactive = self._theme.interactive
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
raise ValueError('Interactive scalar bars disabled for multi-renderer plots')
# by default, use the plotter local theme
kwargs.setdefault('theme', self._theme)
return self.scalar_bars.add_scalar_bar(**kwargs)
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise ValueError('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self, render=False):
"""Close the render window.
Parameters
----------
render : bool
Unused argument.
"""
# optionally run just prior to exiting the plotter
if self._before_close_callback is not None:
self._before_close_callback(self)
self._before_close_callback = None
# must close out widgets first
super().close()
# Renderer has an axes widget, so close it
self.renderers.close()
self.renderers.remove_all_lights()
# Grab screenshots of last render
if self._store_image:
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
# reset scalar bars
self.clear()
# grab the display id before clearing the window
# this is an experimental feature
if KILL_DISPLAY: # pragma: no cover
disp_id = None
if hasattr(self, 'ren_win'):
disp_id = self.ren_win.GetGenericDisplayId()
self._clear_ren_win()
if self.iren is not None:
self.iren.remove_observers()
self.iren.terminate_app()
if KILL_DISPLAY: # pragma: no cover
_kill_display(disp_id)
self.iren = None
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
if hasattr(self, 'renderers'):
self.renderers.deep_clean()
if getattr(self, 'mesh', None) is not None:
self.mesh.point_data = None
self.mesh.cell_data = None
self.mesh = None
if getattr(self, 'mapper', None) is not None:
self.mapper.lookup_table = None
self.mapper = None
self.volume = None
self.textactor = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering.
position : str, tuple(float), optional
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``.
font_size : float, optional
Sets the size of the title font. Defaults to 18.
color : str or sequence, optional
Either a string, RGB list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
Defaults to :attr:`pyvista.global_theme.font.color <pyvista.themes._Font.color>`.
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport : bool, optional
If ``True`` and position is a tuple of float, uses the
normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> actor = pl.add_text('Sample Text', position='upper_right', color='blue',
... shadow=True, font_size=26)
>>> pl.show()
"""
if font is None:
font = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
if color is None:
color = self._theme.font.color
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': _vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': _vtk.vtkCornerAnnotation.LowerRight,
'upper_left': _vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': _vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': _vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': _vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': _vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': _vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = _vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = _vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONTS[font].value)
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24, quality=5, **kwargs):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See ``imagio.get_writer``.
framerate : int, optional
Frames per second.
quality : int, optional
Quality 10 is the top possible quality for any codec. The
range is ``0 - 10``. Higher quality leads to a larger file.
**kwargs : dict, optional
See the documentation for ``imageio.get_writer`` for additional kwargs.
Notes
-----
See the documentation for `imageio.get_writer
<https://imageio.readthedocs.io/en/stable/userapi.html#imageio.get_writer>`_
Examples
--------
Open a MP4 movie and set the quality to maximum.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_movie('movie.mp4', quality=10) # doctest:+SKIP
"""
from imageio import get_writer
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = get_writer(filename, fps=framerate, quality=quality, **kwargs)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in ``"gif"``.
Examples
--------
Open a gif file.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_gif('movie.gif') # doctest:+SKIP
"""
from imageio import get_writer
if filename[-3:] != 'gif':
raise ValueError('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> plotter.open_movie(filename) # doctest:+SKIP
>>> plotter.add_mesh(pyvista.Sphere()) # doctest:+SKIP
>>> plotter.write_frame() # doctest:+SKIP
See :ref:`movie_example` for a full example using this method.
"""
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
if not hasattr(self, 'mwriter'):
raise RuntimeError('This plotter has not opened a movie or GIF file.')
self.update()
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float, optional
Fill value for points in image that do not include objects
in scene. To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool, optional
Reset the camera clipping range to include data in view.
Returns
-------
numpy.ndarray
Image of depth values from camera orthogonal to image
plane.
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.store_image = True
>>> plotter.show()
>>> zval = plotter.get_image_depth()
"""
# allow no render window
if not hasattr(self, 'ren_win') and self.last_image_depth is not None:
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
self._check_rendered()
self._check_has_ren_win()
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = _vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.clipping_range
if self.camera.parallel_projection:
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
self._image_depth_null = np.logical_or(zval < -far, np.isclose(zval, -far))
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line
segments would be represented as ``np.array([[0, 0, 0],
[1, 0, 0], [1, 0, 0], [1, 1, 0]])``.
color : str or sequence, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
width : float, optional
Thickness of lines.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
vtk.vtkActor
Lines actor.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0, 1, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0]])
>>> actor = pl.add_lines(points, color='yellow', width=3)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if not isinstance(lines, np.ndarray):
raise TypeError('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = _vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# Create actor
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [lines, label, rgb_color]
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
@wraps(ScalarBars.remove_scalar_bar)
def remove_scalar_bar(self, *args, **kwargs):
"""Remove the active scalar bar."""
self.scalar_bars.remove_scalar_bar(*args, **kwargs)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001, reset_camera=None, always_visible=False,
render=True):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : sequence or pyvista.DataSet
An ``n x 3`` sequence points or pyvista dataset with points.
labels : list or str
List of labels. Must be the same length as points. If a
string name is given with a :class:`pyvista.DataSet` input for
points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default ``False``.
bold : bool, optional
Bolds title and bar labels. Default ``True``.
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : str or 3 item list, optional
Color of text. Either a string, RGB sequence, or hex color string.
* ``text_color='white'``
* ``text_color='w'``
* ``text_color=[1, 1, 1]``
* ``text_color='#FFFFFF'``
font_family : str, optional
Font family. Must be either ``'courier'``, ``'times'``,
or ``'arial``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
show_points : bool, optional
Controls if points are visible. Default ``True``.
point_color : str or sequence, optional
Either a string, rgb list, or hex color string. One of
the following.
* ``point_color='white'``
* ``point_color='w'``
* ``point_color=[1, 1, 1]``
* ``point_color='#FFFFFF'``
point_size : float, optional
Size of points if visible.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
shape_color : str or sequence, optional
Color of points (if visible). Either a string, rgb
sequence, or hex color string.
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``.
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float, optional
The opacity of the shape in the range of ``[0, 1]``.
pickable : bool, optional
Set whether this actor is pickable.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
tolerance : float, optional
A tolerance to use to determine whether a point label is
visible. A tolerance is usually required because the
conversion from world space to display space during
rendering introduces numerical round-off.
reset_camera : bool, optional
Reset the camera after adding the points to the scene.
always_visible : bool, optional
Skip adding the visibility filter. Default False.
render : bool, optional
Force a render when ``True`` (default).
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0, 0, 0],
... [1, 1, 0],
... [2, 0, 0]])
>>> labels = ['Point A', 'Point B', 'Point C']
>>> actor = pl.add_point_labels(points, labels, italic=True, font_size=20,
... point_color='red', point_size=20, render_points_as_spheres=True,
... always_visible=True, shadow=True)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if font_family is None:
font_family = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
if point_color is None:
point_color = self._theme.color
if text_color is None:
text_color = self._theme.font.color
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_data[labels]
else:
raise TypeError(f'Points type not usable: {type(points)}')
if len(vtkpoints.points) != len(labels):
raise ValueError('There must be one label for each point')
if name is None:
name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})'
vtklabels = _vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Create hierarchy
hier = _vtk.vtkPointSetToLabelHierarchy()
hier.SetLabelArrayName('labels')
if always_visible:
hier.SetInputData(vtkpoints)
else:
# Only show visible points
vis_points = _vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
hier.SetInputConnection(vis_points.GetOutputPort())
# create label mapper
labelMapper = _vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise ValueError(f'Shape ({shape}) not understood')
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor(f'{name}-points', reset_camera=False)
self.remove_actor(f'{name}-labels', reset_camera=False)
# add points
if show_points:
self.add_mesh(vtkpoints, color=point_color, point_size=point_size,
name=f'{name}-points', pickable=pickable,
render_points_as_spheres=render_points_as_spheres,
reset_camera=reset_camera, render=render)
label_actor = _vtk.vtkActor2D()
label_actor.SetMapper(labelMapper)
self.add_actor(label_actor, reset_camera=False,
name=f'{name}-labels', pickable=False)
return label_actor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
An ``n x 3`` numpy.ndarray or pyvista dataset with points.
labels : str, optional
String name of the point data array to use.
fmt : str, optional
String formatter used to format numerical data.
preamble : str, optional
Text before the start of each label.
**kwargs : dict, optional
Keyword arguments passed to
:func:`pyvista.BasePlotter.add_point_labels`.
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if not is_pyvista_dataset(points):
raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}')
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = self._theme.font.fmt
if fmt is None:
fmt = '%.6e'
scalars = points.point_data[labels]
phrase = f'{preamble} %.3e'
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
Array of points or the points from a pyvista object.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
Actor of the mesh.
Examples
--------
Add a numpy array of points to a mesh.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> pl = pyvista.Plotter()
>>> actor = pl.add_points(points, render_points_as_spheres=True,
... point_size=100.0)
>>> pl.show()
"""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to the plotter.
Parameters
----------
cent : np.ndarray
Array of centers.
direction : np.ndarray
Array of direction vectors.
mag : float, optional
Amount to scale the direction vectors.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the arrows.
Examples
--------
Plot a random field of vectors and save a screenshot of it.
>>> import numpy as np
>>> import pyvista
>>> cent = np.random.random((10, 3))
>>> direction = np.random.random((10, 3))
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_arrows(cent, direction, mag=2)
>>> plotter.show()
"""
if cent.shape != direction.shape: # pragma: no cover
raise ValueError('center and direction arrays must have the same shape')
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
if mag != 1:
direction = direction*mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.Update()
glyph3D = _vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img):
"""Save to file and/or return a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise ValueError('Empty image. Have you run plot() first?')
# write screenshot to file if requested
if isinstance(filename, (str, pathlib.Path)):
from PIL import Image
filename = pathlib.Path(filename)
if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute():
filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename))
if not filename.suffix:
filename = filename.with_suffix('.png')
elif filename.suffix not in SUPPORTED_FORMATS:
raise ValueError(f'Unsupported extension {filename.suffix}\n' +
f'Must be one of the following: {SUPPORTED_FORMATS}')
image_path = os.path.abspath(os.path.expanduser(str(filename)))
Image.fromarray(image).save(image_path)
# return image array if requested
if return_img:
return image
def save_graphic(self, filename, title='PyVista Export',
raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
This can be helpful for publication documents.
The supported formats are:
* ``'.svg'``
* ``'.eps'``
* ``'.ps'``
* ``'.pdf'``
* ``'.tex'``
Parameters
----------
filename : str
Path to fsave the graphic file to.
title : str, optional
Title to use within the file properties.
raster : bool, optional
Attempt to write 3D properties as a raster image.
painter : bool, optional
Configure the exporter to expect a painter-ordered 2D
rendering, that is, a rendering at a fixed depth where
primitives are drawn from the bottom up.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_airplane(), smooth_shading=True)
>>> _ = pl.add_background_image(examples.mapfile)
>>> pl.save_graphic("img.svg") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
writer = _vtk.lazy_vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
if extension not in modes:
raise ValueError(f"Extension ({extension}) is an invalid choice.\n\n"
f"Valid options include: {', '.join(modes.keys())}")
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
def screenshot(self, filename=None, transparent_background=None,
return_img=True, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If ``None``, no image is written.
transparent_background : bool, optional
Whether to make the background transparent. The default is
looked up on the plotter's theme.
return_img : bool, optional
If ``True`` (the default), a NumPy array of the image will
be returned.
window_size : 2-length tuple, optional
Set the plotter's size to this ``(width, height)`` before
taking the screenshot.
Returns
-------
numpy.ndarray
Array containing pixel RGB and alpha. Sized:
* [Window height x Window width x 3] if
``transparent_background`` is set to ``False``.
* [Window height x Window width x 4] if
``transparent_background`` is set to ``True``.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter(off_screen=True)
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = self._theme.transparent_background
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery to work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if self.last_image is not None:
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise RuntimeError('This plotter is closed and unable to save a screenshot.')
if self._first_time and not self.off_screen:
raise RuntimeError("Nothing to screenshot - call .show first or "
"use the off_screen argument")
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
return self._save_image(self.image, filename, return_img)
@wraps(Renderers.set_background)
def set_background(self, *args, **kwargs):
"""Wrap ``Renderers.set_background``."""
self.renderers.set_background(*args, **kwargs)
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float, optional
A scaling factor when building the orbital extent.
n_points : int, optional
Number of points on the orbital path.
viewup : list(float), optional
The normal to the orbital plane.
shift : float, optional
Shift the plane up/down from the center of the scene by
this amount.
Returns
-------
pyvista.PolyData
PolyData containing the orbital path.
Examples
--------
Generate an orbital path around a sphere.
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(pyvista.Sphere())
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=50,
... shift=0.0, viewup=viewup)
See :ref:`orbiting_example` for a full example using this method.
"""
if viewup is None:
viewup = self._theme.camera['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
Parameters
----------
point : sequence
Point to fly to in the form of ``(x, y, z)``.
"""
self.iren.fly_to(self.renderer, point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
write_frames=False, threaded=False, progress_bar=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel.
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position.
viewup : list(float), optional
The normal to the orbital plane.
write_frames : bool, optional
Assume a file is open and write a frame on each camera
view during the orbit.
threaded : bool, optional
Run this as a background thread. Generally used within a
GUI (i.e. PyQt).
progress_bar : bool, optional
Show the progress bar when proceeding through the path.
This can be helpful to show progress when generating
movies with ``off_screen=True``.
Examples
--------
Plot an orbit around the earth. Save the gif as a temporary file.
>>> import tempfile
>>> import os
>>> import pyvista
>>> filename = os.path.join(tempfile._get_default_tempdir(),
... next(tempfile._get_candidate_names()) + '.gif')
>>> from pyvista import examples
>>> plotter = pyvista.Plotter(window_size=[300, 300])
>>> _ = plotter.add_mesh(examples.load_globe(), smooth_shading=True)
>>> plotter.open_gif(filename)
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=24,
... shift=0.0, viewup=viewup)
>>> plotter.orbit_on_path(orbit, write_frames=True, viewup=viewup,
... step=0.02)
See :ref:`orbiting_example` for a full example using this method.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = self._theme.camera['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.thickness = path.length
if progress_bar:
try: # pragma: no cover
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to use ``progress_bar=True``")
def orbit():
"""Define the internal thread for running the orbit."""
if progress_bar: # pragma: no cover
points_seq = tqdm(points)
else:
points_seq = points
for point in points_seq:
tstart = time.time() # include the render time in the step time
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
if write_frames:
self.write_frame()
else:
self.render()
sleep_time = step - (time.time() - tstart)
if sleep_time > 0:
time.sleep(sleep_time)
if threaded:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
Parameters
----------
filename : str
Filename to export the scene to. A filename extension of
``'.vtkjs'`` will be added.
compress_arrays : bool, optional
Enable array compression.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_hexbeam())
>>> pl.export_vtkjs("sample") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format.
Parameters
----------
filename : str
Filename to export the scene to. Should end in ``'.obj'``.
Returns
-------
vtkOBJExporter
Object exporter.
"""
# lazy import vtkOBJExporter here as it takes a long time to
# load and is not always used
try:
from vtkmodules.vtkIOExport import vtkOBJExporter
except:
from vtk import vtkOBJExporter
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
# We have to check here if it has the closed attribute as it
# may not exist should the plotter have failed to initialize.
if hasattr(self, '_closed'):
if not self._closed:
self.close()
self.deep_clean()
if hasattr(self, 'renderers'):
del self.renderers
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show()
"""
if self.renderers.has_active_background_renderer:
raise RuntimeError('A background image already exists. '
'Remove it with ``remove_background_image`` '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
if not self._has_background_layer:
self.ren_win.SetNumberOfLayers(3)
renderer = self.renderers.add_background_renderer(image_path, scale, as_global)
self.ren_win.AddRenderer(renderer)
# setup autoscaling of the image
if auto_resize: # pragma: no cover
self.iren.add_observer('ModifiedEvent', renderer.resize)
@wraps(Renderers.remove_background_image)
def remove_background_image(self):
"""Wrap ``Renderers.remove_background_image``."""
self.renderers.remove_background_image()
# return the active renderer to the top, otherwise flat background
# will not be rendered
self.renderer.layer = 0
def _on_first_render_request(self, cpos=None):
"""Once an image or render is officially requested, run this routine.
For example on the show call or any screenshot producing code.
"""
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
def reset_camera_clipping_range(self):
"""Reset camera clipping planes."""
self.renderer.ResetCameraClippingRange()
def add_light(self, light, only_active=False):
"""Add a Light to the scene.
Parameters
----------
light : Light or vtkLight
The light to be added.
only_active : bool, optional
If ``True``, only add the light to the active
renderer. The default is that every renderer adds the
light. To add the light to an arbitrary renderer, see
:func:`pyvista.plotting.renderer.Renderer.add_light`.
Examples
--------
Create a plotter that we initialize with no lights, and add a
cube and a single headlight to it.
>>> import pyvista as pv
>>> plotter = pv.Plotter(lighting='none')
>>> _ = plotter.add_mesh(pv.Cube())
>>> light = pv.Light(color='cyan', light_type='headlight')
>>> plotter.add_light(light)
>>> plotter.show()
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.add_light(light)
def remove_all_lights(self, only_active=False):
"""Remove all lights from the scene.
Parameters
----------
only_active : bool
If ``True``, only remove lights from the active
renderer. The default is that lights are stripped from
every renderer.
Examples
--------
Create a plotter and remove all lights after initialization.
Note how the mesh rendered is completely flat
>>> import pyvista as pv
>>> plotter = pv.Plotter()
>>> plotter.remove_all_lights()
>>> plotter.renderer.lights
[]
>>> _ = plotter.add_mesh(pv.Sphere(), show_edges=True)
>>> plotter.show()
Note how this differs from a plot with default lighting
>>> pv.Sphere().plot(show_edges=True, lighting=True)
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
def where_is(self, name):
"""Return the subplot coordinates of a given actor.
Parameters
----------
name : str
Actor's name.
Returns
-------
list(tuple(int))
A list with the subplot coordinates of the actor.
Examples
--------
>>> import pyvista as pv
>>> plotter = pv.Plotter(shape=(2, 2))
>>> plotter.subplot(0, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(0, 1)
>>> _ = plotter.add_mesh(pv.Sphere(), name='sphere')
>>> plotter.subplot(1, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(1, 1)
>>> _ = plotter.add_mesh(pv.Cone(), name='cone')
>>> plotter.where_is('box')
[(0, 0), (1, 0)]
>>> plotter.show()
"""
places = []
for index in range(len(self.renderers)):
if name in self.renderers[index]._actors:
places.append(tuple(self.renderers.index_to_loc(index)))
return places
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Parameters
----------
off_screen : bool, optional
Renders off screen when ``True``. Useful for automated
screenshots.
notebook : bool, optional
When ``True``, the resulting plot is placed inline a jupyter
notebook. Assumes a jupyter console is active. Automatically
enables ``off_screen``.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render
window. Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : str or 3 item list, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
window_size : list, optional
Window size in pixels. Defaults to ``[1024, 768]``, unless
set differently in the relevant theme's ``window_size``
property.
multi_samples : int, optional
The number of multi-samples used to mitigate aliasing. 4 is a
good default but 8 will have better results with a potential
impact on performance.
line_smoothing : bool, optional
If ``True``, enable line smoothing.
polygon_smoothing : bool, optional
If ``True``, enable polygon smoothing.
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a ``'light_kit'`` (to be precise, 5 separate
lights that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(mesh, color='red')
>>> actor = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show()
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
groups=None, row_weights=None, col_weights=None,
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None, lighting='light kit',
theme=None):
"""Initialize a vtk plotting object."""
super().__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
groups=groups, row_weights=row_weights,
col_weights=col_weights,
splitting_position=splitting_position,
title=title, lighting=lighting, theme=theme)
log.debug('Plotter init start')
# check if a plotting backend is enabled
_warn_xserver()
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.terminate_app()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
if self._theme.notebook is not None:
notebook = self._theme.notebook
else:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
self._window_size_unset = False
if window_size is None:
self._window_size_unset = True
window_size = self._theme.window_size
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = self._theme.multi_samples
# initialize render window
self.ren_win = _vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
# Add the shadow renderer to allow us to capture interactions within
# a given viewport
# https://vtk.org/pipermail/vtkusers/2018-June/102030.html
number_or_layers = self.ren_win.GetNumberOfLayers()
current_layer = self.renderer.GetLayer()
self.ren_win.SetNumberOfLayers(number_or_layers + 1)
self.ren_win.AddRenderer(self.renderers.shadow_renderer)
self.renderers.shadow_renderer.SetLayer(current_layer + 1)
self.renderers.shadow_renderer.SetInteractive(False) # never needs to capture
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
# vtkGenericRenderWindowInteractor has no event loop and
# allows the display client to close on Linux when
# off_screen. We still want an interactor for off screen
# plotting since there are some widgets (like the axes
# widget) that need an interactor
interactor = _vtk.vtkGenericRenderWindowInteractor()
else:
interactor = None
# Add ren win and interactor
self.iren = RenderWindowInteractor(self, light_follow_camera=False,
interactor=interactor)
self.iren.set_render_window(self.ren_win)
self.enable_trackball_style() # internally calls update_style()
self.iren.add_observer("KeyPressEvent", self.key_press_event)
# Set background
self.set_background(self._theme.background)
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self.iren.add_observer(_vtk.vtkCommand.TimerEvent, on_timer)
if self._theme.depth_peeling.enabled:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
log.debug('Plotter init stop')
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=None,
screenshot=False, return_img=False, cpos=None, use_ipyvtk=None,
jupyter_backend=None, return_viewer=False, return_cpos=None,
**kwargs):
"""Display the plotting window.
Parameters
----------
title : str, optional
Title of plotting window. Defaults to
:attr:`pyvista.global_theme.title <pyvista.themes.DefaultTheme.title>`.
window_size : list, optional
Window size in pixels. Defaults to
:attr:`pyvista.global_theme.window_size <pyvista.themes.DefaultTheme.window_size>`.
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
Defaults to
:attr:`pyvista.global_theme.interactive <pyvista.themes.DefaultTheme.interactive>`.
auto_close : bool, optional
Exits plotting session when user closes the window when
interactive is ``True``. Defaults to
:attr:`pyvista.global_theme.auto_close <pyvista.themes.DefaultTheme.auto_close>`.
interactive_update : bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call :func:`BasePlotter.update` in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
``window_size``. Defaults to
:attr:`pyvista.global_theme.full_screen <pyvista.themes.DefaultTheme.full_screen>`.
screenshot : str or bool, optional
Take a screenshot of the initial state of the plot.
If a string, it specifies the path to which the screenshot
is saved. If ``True``, the screenshot is returned as an
array. Defaults to ``False``. For interactive screenshots
it's recommended to first call ``show()`` with
``auto_close=False`` to set the scene, then save the
screenshot in a separate call to ``show()`` or
:func:`Plotter.screenshot`.
return_img : bool
Returns a numpy array representing the last image along
with the camera position.
cpos : list(tuple(floats))
The camera position. You can also set this with
:attr:`Plotter.camera_position`.
use_ipyvtk : bool, optional
Deprecated. Instead, set the backend either globally with
``pyvista.set_jupyter_backend('ipyvtklink')`` or with
``backend='ipyvtklink'``.
jupyter_backend : str, optional
Jupyter notebook plotting backend to use. One of the
following:
* ``'none'`` : Do not display in the notebook.
* ``'pythreejs'`` : Show a ``pythreejs`` widget
* ``'static'`` : Display a static figure.
* ``'ipygany'`` : Show a ``ipygany`` widget
* ``'panel'`` : Show a ``panel`` widget.
This can also be set globally with
:func:`pyvista.set_jupyter_backend`.
return_viewer : bool, optional
Return the jupyterlab viewer, scene, or display object
when plotting with jupyter notebook.
return_cpos : bool, optional
Return the last camera position from the render window
when enabled. Default based on theme setting. See
:attr:`pyvista.themes.DefaultTheme.return_cpos`.
**kwargs : dict, optional
Developer keyword arguments.
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Returned only when ``return_cpos=True`` or set in the
default global or plot theme. Not returned when in a
jupyter notebook and ``return_viewer=True``.
image : np.ndarray
Numpy array of the last image when either ``return_img=True``
or ``screenshot=True`` is set. Not returned when in a
jupyter notebook with ``return_viewer=True``. Optionally
contains alpha values. Sized:
* [Window height x Window width x 3] if the theme sets
``transparent_background=False``.
* [Window height x Window width x 4] if the theme sets
``transparent_background=True``.
widget
IPython widget when ``return_viewer=True``.
Notes
-----
Please use the ``q``-key to close the plotter as some
operating systems (namely Windows) will experience issues
saving a screenshot if the exit button in the GUI is pressed.
Examples
--------
Simply show the plot of a mesh.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show()
Take a screenshot interactively. Screenshot will be of the
first image shown, so use the first call with
``auto_close=False`` to set the scene before taking the
screenshot.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show(auto_close=False) # doctest:+SKIP
>>> pl.show(screenshot='my_image.png') # doctest:+SKIP
Display a ``pythreejs`` scene within a jupyter notebook
>>> pl.show(jupyter_backend='pythreejs') # doctest:+SKIP
Return a ``pythreejs`` scene.
>>> pl.show(jupyter_backend='pythreejs', return_viewer=True) # doctest:+SKIP
Obtain the camera position when using ``show``.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Sphere())
>>> pl.show(return_cpos=True) # doctest:+SKIP
[(2.223005211686484, -0.3126909484828709, 2.4686209867735065),
(0.0, 0.0, 0.0),
(-0.6839951597283509, -0.47207319712073137, 0.5561452310578585)]
"""
# developer keyword argument: runs a function immediately prior to ``close``
self._before_close_callback = kwargs.pop('before_close_callback', None)
jupyter_kwargs = kwargs.pop('jupyter_kwargs', {})
assert_empty_kwargs(**kwargs)
if interactive_update and auto_close is None:
auto_close = False
elif interactive_update and auto_close:
warnings.warn(textwrap.dedent("""\
The plotter will close immediately automatically since ``auto_close=True``.
Either, do not specify ``auto_close``, or set it to ``False`` if you want to
interact with the plotter interactively.\
""")
)
elif auto_close is None:
auto_close = self._theme.auto_close
if use_ipyvtk:
txt = textwrap.dedent("""\
use_ipyvtk is deprecated. Set the backend
globally with ``pyvista.set_jupyter_backend("ipyvtklink"
or with ``backend="ipyvtklink"``)
""")
from pyvista.core.errors import DeprecationError
raise DeprecationError(txt)
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
if full_screen is None:
full_screen = self._theme.full_screen
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
else:
self._window_size_unset = False
self.ren_win.SetSize(window_size[0], window_size[1])
# reset unless camera for the first render unless camera is set
self._on_first_render_request(cpos)
# handle plotter notebook
if jupyter_backend and not self.notebook:
warnings.warn('Not within a jupyter notebook environment.\n'
'Ignoring ``jupyter_backend``.')
if self.notebook:
from ..jupyter.notebook import handle_plotter
if jupyter_backend is None:
jupyter_backend = self._theme.jupyter_backend
if jupyter_backend != 'none':
disp = handle_plotter(self, backend=jupyter_backend,
return_viewer=return_viewer,
**jupyter_kwargs)
return disp
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
if pyvista.BUILDING_GALLERY or screenshot:
# always save screenshots for sphinx_gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and not self.off_screen:
try: # interrupts will be caught here
log.debug('Starting iren')
self.iren.update_style()
if not interactive_update:
# Resolves #1260
if os.name == 'nt':
if _vtk.VTK9:
self.iren.process_events()
else:
global VERY_FIRST_RENDER
if not VERY_FIRST_RENDER:
self.iren.start()
VERY_FIRST_RENDER = False
self.iren.start()
self.iren.initialize()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, "
"you have destroyed the render window and we have to "
"close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Close the render window if requested
if auto_close:
self.close()
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
if return_cpos:
return self.camera_position, self.last_image
if return_cpos:
return self.camera_position
def add_title(self, title, font_size=18, color=None, font=None,
shadow=False):
"""Add text to the top center of the plot.
This is merely a convenience method that calls ``add_text``
with ``position='upper_edge'``.
Parameters
----------
title : str
The text to add the rendering.
font_size : float, optional
Sets the size of the title font. Defaults to 16 or the
value of the global theme if set.
color : str or 3 item list, optional,
Either a string, rgb list, or hex color string. Defaults
to white or the value of the global theme if set. For
example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.background_color = 'grey'
>>> actor = pl.add_title('Plot Title', font='courier', color='k',
... font_size=40)
>>> pl.show()
"""
# add additional spacing from the top of the figure by default
title = '\n' + title
return self.add_text(title, position='upper_edge',
font_size=font_size, color=color, font=font,
shadow=shadow, name='title', viewport=False)
def add_cursor(
self,
bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0),
focal_point=(0.0, 0.0, 0.0),
color=None,
):
"""Add a cursor of a PyVista or VTK dataset to the scene.
Parameters
----------
bounds : length 6 sequence
Specify the bounds in the format of:
- ``(xmin, xmax, ymin, ymax, zmin, zmax)``
Defaults to ``(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)``.
focal_point : list or tuple, optional
The focal point of the cursor.
Defaults to ``(0.0, 0.0, 0.0)``.
color : str or sequence, optional
Either a string, RGB sequence, or hex color string. For one
of the following.
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
Returns
-------
vtk.vtkActor
VTK actor of the 2D cursor.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> _ = plotter.add_cursor()
>>> plotter.show()
"""
alg = _vtk.vtkCursor3D()
alg.SetModelBounds(bounds)
alg.SetFocalPoint(focal_point)
alg.AllOn()
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(parse_color(color))
return actor
# Tracks created plotters. At the end of the file as we need to
# define ``BasePlotter`` before including it in the type definition.
_ALL_PLOTTERS: Dict[str, BasePlotter] = {}
def _kill_display(disp_id): # pragma: no cover
"""Forcibly close the display on Linux.
See: https://gitlab.kitware.com/vtk/vtk/-/issues/17917#note_783584
And more details into why...
https://stackoverflow.com/questions/64811503
Notes
-----
This is to be used experimentally and is known to cause issues
on `pyvistaqt`
"""
if platform.system() != 'Linux':
raise OSError('This method only works on Linux')
if disp_id:
cdisp_id = int(disp_id[1:].split('_')[0], 16)
# this is unsafe as events might be queued, but sometimes the
# window fails to close if we don't just close it
Thread(target=X11.XCloseDisplay, args=(cdisp_id, )).start()
|
WebGUI.py
|
import threading
import time
import cv2 as cv
from flask import Flask
from flask import render_template
from flask import Response
from .utils import visualization_utils as vis_util
category_index ={0:{
"id": 0,
"name": "Pedestrian",
}} # TODO: json file for detector config
class WebGUI:
"""
The webgui object implements a flask application and acts as an interface for users.
Once it is created it will act as a central application for the view outputs.
:param config:
:param engine_instance:
"""
def __init__(self, config, engine_instance):
self.config = config
self.__ENGINE_INSTANCE = engine_instance
self._output_frame = None
self._lock = threading.Lock()
self._host = self.config.get_section_dict("App")["Host"]
self._port = int(self.config.get_section_dict("App")["Port"])
self.app = self.create_flask_app()
self._dist_threshold = float(self.config.get_section_dict("Detector")["DistThreshold"])
def update(self, input_frame, nn_out, distances):
"""
Args:
input_frame: uint8 numpy array with shape (img_height, img_width, 3)
nn_out: a list of dicionary contains normalized numbers of bounding boxes
{'id' : '0-0', 'bbox' : [x0, y0, x1, y1], 'score' : 0.99(optional} of shape [N, 3] or [N, 2]
distances: a symmetric matrix of normalized distances
Returns:
draw the bounding boxes to an output frame
"""
output_dict = vis_util.visualization_preparation(nn_out, distances, self._dist_threshold)
vis_util.visualize_boxes_and_labels_on_image_array(
input_frame,
output_dict["detection_boxes"],
output_dict["detection_classes"],
output_dict["detection_scores"],
output_dict["detection_colors"],
category_index,
instance_masks=output_dict.get("detection_masks"),
use_normalized_coordinates=True,
line_thickness=3,
)
with self._lock:
self._output_frame = input_frame.copy()
def create_flask_app(self):
app = Flask(__name__)
@app.route("/")
def _index():
return render_template("index.html")
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(
self._generate(), mimetype="multipart/x-mixed-replace; boundary=frame"
)
return app
def _generate(self):
while True:
with self._lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if self._output_frame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv.imencode(".jpeg", self._output_frame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + bytearray(encodedImage) + b"\r\n"
)
def _run(self):
self.app.run(
host=self._host, port=self._port, debug=True, threaded=True, use_reloader=False,
)
def start(self):
threading.Thread(target=self._run).start()
time.sleep(1)
video_path = self.config.get_section_dict("App")["VideoPath"]
self.__ENGINE_INSTANCE.process_video(video_path)
|
tcp_server.py
|
import socket
import threading
import struct
import time
class TcpServer(object):
def __init__(self, host, port):
self.host_ = host
self.port_ = port
self.sock_ = None
self.quit_event_ = threading.Event()
def launch(self):
print("Server launched at {}:{}".format(self.host_, self.port_))
self.sock_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_.bind( (self.host_, self.port_) )
self.sock_.listen(1)
self.quit_event_.clear()
self.start_server_()
def stop(self):
self.quit_event_.set()
if self.sock_ is not None:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect( (self.host_, self.port_))
self.sock_.close()
self.sock_ = None
def start_server_(self):
# listen to connection request
while not self.quit_event_.is_set():
# blocked for next connection
conn, addr = self.sock_.accept()
thread = threading.Thread(target=self.handle_connection_, args=(conn, addr))
thread.start()
# This function need to be override by its child class
def handle_connection_(self, conn, addr):
pass
def recv_all_(self, sock, msg_length):
data = b""
size_left = msg_length
while len(data) < msg_length and not self.quit_event_.is_set():
recv_data = sock.recv(size_left)
size_left -= len(recv_data)
data += recv_data
return data
|
peer.py
|
import sys
import datetime
import time
from concurrent import futures
import threading
import socket
import grpc
import p2p_msg_pb2
import p2p_msg_pb2_grpc
class PeerServicer(p2p_msg_pb2_grpc.PeerServicer):
"""This class implements p2p_msg_pb2_grpc.PeerServicer interface that was generated from .proto."""
def __init__(self, username):
"""Constructor."""
self.username = username
def Msg(self, requestIterator, context):
"""Prints messages that were sent from other peer.
Returns 'stream' of Empty to maintain connection."""
for newMsg in requestIterator:
printMsg(newMsg)
yield p2p_msg_pb2.Empty()
def SubscribeMsg(self, request, context):
"""Starts listening server's user input and sends it to subscribers."""
print('User connected.')
return listenInput(self.username)
def printMsg(msg):
"""Prints formatted PeerMessage."""
print('[' + msg.time + '] ' + msg.name + ': ' + msg.text)
def listenInput(username):
"""Listens user input and returns formatted messages (PeerMessage) using generators."""
print('Starting listening user input...')
while (True):
msgToSend = input()
if (len(msgToSend) == 0):
continue
timeStr = datetime.datetime.now().strftime('%H:%M:%S')
yield p2p_msg_pb2.PeerMessage(
name = username,
time = timeStr,
text = msgToSend)
def startServer(port, username):
"""Starts server using PeerServicer class to service RPCs."""
try:
server = grpc.server(futures.ThreadPoolExecutor(max_workers=16))
p2p_msg_pb2_grpc.add_PeerServicer_to_server(PeerServicer(username), server)
server.add_insecure_port('[::]:' + port)
server.start()
print('Server started. Waiting for client to connect...')
server.wait_for_termination()
except:
print('Server error occured.')
def listenServer(stub):
"""Starts listening server by subcribing to PeerServicer.
Prints all received messages."""
try:
rs = stub.SubscribeMsg(p2p_msg_pb2.Empty())
for r in rs:
printMsg(r)
except:
print('Error occured while listening server.')
def startSending(serverip, port, username):
"""Listens server using IP and port and handles user input."""
with grpc.insecure_channel(serverip + ':' + port) as channel:
stub = p2p_msg_pb2_grpc.PeerStub(channel)
print('Starting listening server...')
ls = threading.Thread(target = listenServer, args = (stub,))
ls.start()
ers = stub.Msg(listenInput(username))
for r in ers:
continue
ls.join()
# Main
if len(sys.argv) < 4:
print('Please specify peer\'s IP, port and your name to be a client')
print('or type \'--server\' instead of IP to be a server and a client at the same time.')
sys.exit(0)
isFirst = sys.argv[1] == '--server'
port = sys.argv[2]
username = sys.argv[3]
try:
if isFirst:
startServer(port, username)
else:
ip = sys.argv[1]
startSending(ip, port, username)
except:
print('Error occured.')
|
startup.py
|
#startup.py v4 - by Mark Harris with contribution from Stuart. Thank you for your help.
# Used to start 3 scripts, one to run the metar leds and one to update an LCD display or OLED displays. A 3rd as watchdog.
# Taken from https://raspberrypi.stackexchange.com/questions/39108/how-do-i-start-two-different-python-scripts-with-rc-local
import time
import threading
import os
import config
import sys
import logging
import logzero #had to manually install logzero. https://logzero.readthedocs.io/en/latest/
from logzero import logger
import config #Config.py holds user settings used by the various scripts
import admin
# Setup rotating logfile with 3 rotations, each with a maximum filesize of 1MB:
version = admin.version #Software version
loglevel = config.loglevel
loglevels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR]
logzero.loglevel(loglevels[loglevel]) #Choices in order; DEBUG, INFO, WARNING, ERROR
logzero.logfile("/NeoSectional/logfile.log", maxBytes=1e6, backupCount=3)
logger.info("\n\nStartup of startup.py Script, Version " + version)
logger.info("Log Level Set To: " + str(loglevels[loglevel]))
#misc settings
displayused = config.displayused #0 = no, 1 = yes. If no, then only the metar.py script will be run. Otherwise both scripts will be threaded.
autorun = config.autorun #0 = no, 1 = yes. If yes, live sectional will run on boot up. No, must run from cmd line.
title1 = "metar-v4.py" #define the filename for the metar.py file
prog1 = "sudo python3 /NeoSectional/metar-v4.py"
title2 = "metar-display-v4.py" #define the filename for the display.py file
prog2 = "sudo python3 /NeoSectional/metar-display-v4.py"
title3 = "check-display.py" #define the filename for the check-display.py file
prog3 = "sudo python3 /NeoSectional/check-display.py"
def startprgm(i):
logger.info("Running thread %d" % i)
if (i == 0): #Run first program prog1
time.sleep(1)
logger.info(title1) #display filename being run
os.system(prog1) #execute filename
if (i == 1) and (displayused): #Run second program prog2 if display is being used.
logger.info(title2) #display filename being run
time.sleep(1)
os.system(prog2) #execute filename
if (i == 2) and (displayused): #Run second program prog3 if display is being used (watchdog for displays).
logger.info(title3) #display filename being run
time.sleep(1)
os.system(prog3) #execute filename
pass
if len(sys.argv) > 1 or autorun == 1:
# print (sys.argv[1] + " from cmd line") #debug
for i in range(3):
t = threading.Thread(target=startprgm, args=(i,))
t.start()
|
utils.py
|
import asyncio
import functools
import html
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from typing import Any as AnyType
from typing import Dict, List
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
# Import config serialization functions here for backward compatibility
from dask.config import deserialize as deserialize_for_cli # noqa
from dask.config import serialize as serialize_for_cli # noqa
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa: F401
format_bytes,
format_time,
funcname,
parse_bytes,
parse_timedelta,
)
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
@functools.lru_cache(None)
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for newline-delimited string of log entries"""
level_styles = {
"WARNING": "font-weight: bold; color: orange;",
"CRITICAL": "font-weight: bold; color: orangered;",
"ERROR": "font-weight: bold; color: crimson;",
}
def _repr_html_(self):
logs_html = []
for message in self.split("\n"):
style = "font-family: monospace; margin: 0;"
for level in self.level_styles:
if level in message:
style += self.level_styles[level]
break
logs_html.append(
'<p style="{style}">{message}</p>'.format(
style=html.escape(style),
message=html.escape(message),
)
)
return "\n".join(logs_html)
class Logs(dict):
"""A container for a dict mapping names to strings of log entries"""
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> List[Dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
|
upnp.py
|
import logging
import threading
from queue import Queue
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
def __init__(self):
self.queue = Queue()
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
self.upnp.deleteportmapping(port, "TCP")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "kale", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
self.upnp.deleteportmapping(port, "TCP")
log.info(f"Port {port} closed with UPnP")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run kale, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
self.queue.put(("shutdown",))
self.thread.join()
def __del__(self):
self.shutdown()
|
webserver_context.py
|
"""Provides context manager that runs local webserver."""
import contextlib
import os
import posixpath
import requests
import time
from multiprocessing import Process
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote
class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Copy of SimpleHTTPRequestHandler with base_dir."""
base_dir = None # Must be set before use
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
**Based on code in http.server.SimpleHTTPRequestHandler with
modification of base path to come from self.base_dir**
"""
# abandon query parameters
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
path = unquote(path)
path = posixpath.normpath(path)
words = path.split('/')
words = filter(None, words)
# Now make path from self.base_dir and words
tpath = self.base_dir
for word in words:
if word != '..':
tpath = os.path.join(tpath, word)
if trailing_slash:
tpath += '/'
return tpath
def run_webserver(dir, host, port):
"""Run webserver.
dir - base directory for files to serve
host & port - hostname and port of server
"""
# Up until Python 3.7 is worked to set .base_dir in webserver(...) before
# creating the Process(...). This changed in 3.8 after which the value was
# not seen when running the server. Setting the class variable here instead
# works across different version
MyHTTPRequestHandler.base_dir = dir
server_address = (host, port)
httpd = HTTPServer(server_address, MyHTTPRequestHandler)
httpd.serve_forever()
@contextlib.contextmanager
def webserver(dir='/tmp/htdocs', host='localhost', port=9999):
"""Context Manager that provides a webserver serving files from dir."""
p = Process(target=run_webserver, args=(dir, host, port))
p.start()
# Wait for the server to be launched
base_url = 'http://%s:%d/' % (host, port)
for j in range(0, 10):
try:
requests.get(base_url, timeout=0.1)
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.1)
else:
print("Failed to start test webserver from %s at host=%s port=%d" % (dir, host, port))
# Setup complete, yield to execute with clause
try:
yield
finally:
# Close the server
p.terminate()
time.sleep(0.1)
if __name__ == '__main__':
print('Will start webserver at localhost:9999 serving from /tmp/htdocs for 10s')
with webserver():
print('Started...')
time.sleep(10)
# Things with server go in here
print('Exited server')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.