source
stringlengths
3
86
python
stringlengths
75
1.04M
session_test.py
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time import tensorflow.python.platform import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import config_pb2 from tensorflow.core.lib.core import error_codes_pb2 from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import constant_op from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.util import compat # NOTE(mrry): Dummy shape registration for op used in the tests. ops.RegisterShape('ConstructionFails')(None) class SessionTest(test_util.TensorFlowTestCase): def testUseExistingGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(graph=g): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testUseDefaultGraph(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testCreate(self): with session.Session(): inp = constant_op.constant(10.0, name='W1') copy = array_ops.identity(inp) # Test with feed. # TODO(mrry): Investigate why order='F' didn't work. arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C') copy_val = copy.eval({'W1:0': arr}) self.assertAllEqual(arr, copy_val) # Test without feed. copy_val = copy.eval() self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val) def testManyCPUs(self): # TODO(keveman): Implement ListDevices and test for the number of # devices returned by ListDevices. with session.Session( config=config_pb2.ConfigProto(device_count={'CPU': 2})): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testErrorsReported(self): with session.Session() as s: constant_op.constant(10.0, name='W1') with self.assertRaises(ValueError): s.run('foo:0') def testErrorPayload(self): with session.Session(): a = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError(lambda e: e.op == a.op): a.eval() def testOpConstructionErrorPayload(self): with session.Session(): failing_op = ops.get_default_graph().create_op( 'ConstructionFails', [], [], name='f') def exc_predicate(e): return (e.op == failing_op and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): failing_op.run() def testErrorBasedOn(self): with session.Session() as sess: a = constant_op.constant(0.0, shape=[2, 3]) # NOTE(mrry): The original_op is nonsense, but used here to test that the # errors are reported correctly. # pylint: disable=protected-access with sess.graph._original_op(a.op): b = array_ops.identity(a, name='id') with sess.graph._original_op(b.op): c = array_ops.placeholder(dtypes.float32) # pylint: enable=protected-access def exc_predicate(e): return (e.op == c.op and e.op._original_op == b.op and e.op._original_op._original_op == a.op) with self.assertRaisesOpError(exc_predicate): c.eval() def testFetchTensorObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) results_with_list = s.run([c]) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0]) results_with_single = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single) results_with_get = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get) a_val, b_val = s.run([a, b]) # Test multiple fetches. self.assertAllEqual([[1.0, 1.0]], a_val) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val) def testFetchScalar(self): with session.Session() as s: for scalar in np.int32, np.int64, np.float32, np.float64: x = scalar(7) y = scalar(8) tf_x = constant_op.constant(x, shape=[]) tf_y = constant_op.constant(y) tf_xy = math_ops.add(tf_x, tf_y) # Single fetch xy = s.run(tf_xy) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # List fetch xy, = s.run([tf_xy]) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) def testFetchOperationObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) v = variables.Variable(a, name='testFetchOperationObject_v') s.run(v.initializer) v_val = s.run(v) self.assertAllEqual([[1.0, 1.0]], v_val) def testFetchSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = ops.SparseTensor( constant_op.constant(indices), constant_op.constant(values), constant_op.constant(shape)) # Single fetch, use as tuple sp_out = s.run(sp) indices_out, values_out, shape_out = sp_out self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Single fetch, use as SparseTensorValue sp_out = s.run(sp) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.shape, shape) # Tuple fetch, use as tuple indices_out, values_out, shape_out = s.run(sp) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as tuple (indices_out, values_out, shape_out), = s.run([sp]) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as SparseTensorValue sp_out, = s.run([sp]) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.shape, shape) def testFeedSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = ops.SparseTensor( array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(3,)),) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.shape) sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: ops.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.shape, shape) def testExtendWithStatelessOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) # Extend will happen here. e_val = s.run(e) self.assertAllEqual([[24.0]], e_val) def testExtendWithStatefulOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testExtendWithStatefulOperations_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) # Extend will happen here. e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) def testExtendWithGroupBy(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) p = variables.Variable(a, name='testExtendWithGroupBy_p') a_val = a.eval() # Force an Extend after this op. self.assertAllEqual([[1.0, 1.0]], a_val) b = constant_op.constant(2.0, shape=[1, 2]) q = variables.Variable(b, name='testExtendWithGroupBy_q') # Extend will happen here. init = control_flow_ops.group(p.initializer, q.initializer) s.run(init) p_val, q_val = s.run([p, q]) self.assertAllEqual([[1.0, 1.0]], p_val) self.assertAllEqual([[2.0, 2.0]], q_val) def testTensorGetMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) def testOperationRunMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 2], name='b') v = variables.Variable(a, a.dtype) assign_a_to_v = state_ops.assign(v, a) assign_a_to_v.eval() v_val = v.eval() self.assertAllEqual([[1.0, 1.0]], v_val) assign_b_to_v = state_ops.assign(v, b) assign_b_to_v.eval() v_val = v.eval() self.assertAllEqual([[2.0, 2.0]], v_val) assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]}) v_val = v.eval() self.assertAllEqual([[3.0, 3.0]], v_val) def testDefaultGraph(self): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) self.assertEqual(ops.get_default_graph(), a.graph) self.assertEqual(ops.get_default_graph(), b.graph) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testDefaultGraph_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def _testDefaultGraphInThread(self, constructed_event, continue_event, i): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='var_%d' % i) # Block here until all threads have constructed their graph. constructed_event.set() continue_event.wait() assign_c_to_v = state_ops.assign(v, c) v.initializer.run() assign_c_to_v.eval() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def testDefaultGraphWithThreads(self): # Fork ten threads that use their thread-local default graph. threads = [] constructed_events = [threading.Event() for _ in range(10)] continue_event = threading.Event() for i, constructed_event in enumerate(constructed_events): t = self.checkedThread(target=self._testDefaultGraphInThread, args=(constructed_event, continue_event, i)) threads.append(t) for t in threads: t.start() for constructed_event in constructed_events: constructed_event.wait() continue_event.set() for t in threads: t.join() def testParallelRun(self): with session.Session() as sess: c = constant_op.constant(5.0) ev = threading.Event() def run_step(): ev.wait() val = c.eval(session=sess) self.assertEqual(val, 5.0) threads = [self.checkedThread(target=run_step) for _ in range(100)] for t in threads: t.start() ev.set() for t in threads: t.join() def testRunFeedDict(self): with session.Session() as s: x = array_ops.zeros([2]) y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x: [1, 1]}) assert (y == 2 * np.ones(2)).all() def testGraphDef(self): with session.Session() as sess: self.assertProtoEquals('version: %d' % versions.GRAPH_DEF_VERSION, sess.graph_def) c = constant_op.constant(5.0, name='c') self.assertEquals(len(sess.graph_def.node), 1) d = constant_op.constant(6.0, name='d') self.assertEquals(len(sess.graph_def.node), 2) self.assertAllEqual(c.eval(), 5.0) self.assertAllEqual(d.eval(), 6.0) e = constant_op.constant(7.0, name='e') self.assertEquals(len(sess.graph_def.node), 3) self.assertAllEqual(e.eval(), 7.0) def testUseAfterClose(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): sess.run(c) def testUseAfterCloseConcurrent(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) def update_thread(): with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): while True: sess.run(c) t = threading.Thread(target=update_thread) t.start() time.sleep(0.1) sess.close() t.join() def testUseEmptyGraph(self): with session.Session() as sess: with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'The Session graph is empty.' in str(e)): sess.run([]) def testNotEntered(self): # pylint: disable=protected-access self.assertEqual(ops._default_session_stack.get_default(), None) # pylint: enable=protected-access with ops.device('/cpu:0'): sess = session.Session() c_1 = constant_op.constant(5.0) with sess.graph.as_default(): c_2 = constant_op.constant(5.0) self.assertEqual(c_1.graph, c_2.graph) self.assertEqual(sess.run(c_2), 5.0) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: 'No default session is registered.' in str(e)): c_2.eval() def testInteractive(self): with ops.device('/cpu:0'): sess = session.InteractiveSession() a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval()) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) self.assertAllEqual([[24.0]], e.eval()) sess.close() def testSharedGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) with session.Session(graph=g) as sess1: with session.Session(graph=g) as sess2: self.assertAllEqual(sess1.run(c), sess2.run(c)) def testDuplicatedInputs(self): with session.Session() as sess: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 3]) a_val, b_val, a2_val = sess.run([a, b, a]) self.assertAllEqual(a_val, [[1.0, 1.0]]) self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]]) self.assertAllEqual(a2_val, [[1.0, 1.0]]) def testFeedAndFetch(self): with session.Session(): for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool, dtypes.complex64]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype feed_t = array_ops.placeholder(dtype=dtype, shape=shape) out_t = array_ops.identity(feed_t) np_array = np.random.randint(-10, 10, shape) if dtype == dtypes.bool: np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) self.assertAllEqual(np_array, out_t.eval(feed_dict={feed_t: np_array})) def testStringFetch(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) if size > 0 else [] c = constant_op.constant(c_list) self.assertAllEqual(c.eval(), c_list) def testStringFeed(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape) c = array_ops.identity(feed_t) self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list) def testStringFeedWithNullCharacters(self): with session.Session(): c_list = [b'\n\x01\x00', b'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0]) self.assertEqual(c_list[1], out[1]) def testStringFeedWithUnicode(self): with session.Session(): c_list = [u'\n\x01\x00', u'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0].decode('utf-8')) self.assertEqual(c_list[1], out[1].decode('utf-8')) out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)}) self.assertEqual(c_list[0], out[0].decode('utf-8')) self.assertEqual(c_list[1], out[1].decode('utf-8')) def testInvalidTargetFails(self): with self.assertRaises(RuntimeError): session.Session('INVALID_TARGET') def testFetchByNameDifferentStringTypes(self): with session.Session() as sess: c = constant_op.constant(42.0, name='c') d = constant_op.constant(43.0, name=u'd') e = constant_op.constant(44.0, name=b'e') f = constant_op.constant(45.0, name=r'f') self.assertTrue(isinstance(c.name, six.text_type)) self.assertTrue(isinstance(d.name, six.text_type)) self.assertTrue(isinstance(e.name, six.text_type)) self.assertTrue(isinstance(f.name, six.text_type)) self.assertEqual(42.0, sess.run('c:0')) self.assertEqual(42.0, sess.run(u'c:0')) self.assertEqual(42.0, sess.run(b'c:0')) self.assertEqual(42.0, sess.run(r'c:0')) self.assertEqual(43.0, sess.run('d:0')) self.assertEqual(43.0, sess.run(u'd:0')) self.assertEqual(43.0, sess.run(b'd:0')) self.assertEqual(43.0, sess.run(r'd:0')) self.assertEqual(44.0, sess.run('e:0')) self.assertEqual(44.0, sess.run(u'e:0')) self.assertEqual(44.0, sess.run(b'e:0')) self.assertEqual(44.0, sess.run(r'e:0')) self.assertEqual(45.0, sess.run('f:0')) self.assertEqual(45.0, sess.run(u'f:0')) self.assertEqual(45.0, sess.run(b'f:0')) self.assertEqual(45.0, sess.run(r'f:0')) def testIncorrectGraph(self): with ops.Graph().as_default() as g_1: c_1 = constant_op.constant(1.0, name='c') with ops.Graph().as_default() as g_2: c_2 = constant_op.constant(2.0, name='c') self.assertEqual('c', c_1.op.name) self.assertEqual('c', c_2.op.name) with session.Session(graph=g_1) as sess_1: self.assertEqual(1.0, sess_1.run(c_1)) with self.assertRaises(ValueError): sess_1.run(c_2) with self.assertRaises(ValueError): sess_1.run(c_2.op) with session.Session(graph=g_2) as sess_2: with self.assertRaises(ValueError): sess_2.run(c_1) with self.assertRaises(ValueError): sess_2.run(c_1.op) self.assertEqual(2.0, sess_2.run(c_2)) if __name__ == '__main__': googletest.main()
util.py
import os import re import shutil import sys import ctypes from pathlib import Path from colorama import Fore, Back, Style from .settings import * if sys.version_info[0] < 3 or sys.version_info[1] <= 5: raise RuntimeError( "\nPlease restart with Python 3.6+\n" + "Current Python version:", sys.version_info) ti_core = None def in_docker(): if os.environ.get("TI_IN_DOCKER", "") == "": return False else: return True def import_ti_core(tmp_dir=None): global ti_core if get_os_name() != 'win': old_flags = sys.getdlopenflags() sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND else: pyddir = os.path.join(package_root(), 'lib') os.environ['PATH'] += ';' + pyddir try: import taichi_core as core except Exception as e: if isinstance(e, ImportError): print( Fore.YELLOW + "Share object taichi_core import failed, " "check this page for possible solutions:\n" "https://taichi.readthedocs.io/en/stable/install.html#troubleshooting" + Fore.RESET) if get_os_name() == 'win': e.msg += '\nConsider installing Microsoft Visual C++ Redistributable: https://aka.ms/vs/16/release/vc_redist.x64.exe' elif get_os_name() == 'linux': e.msg += '\nConsider installing libtinfo5: sudo apt-get install libtinfo5' raise e from None ti_core = core if get_os_name() != 'win': sys.setdlopenflags(old_flags) lib_dir = os.path.join(package_root(), 'lib') core.set_lib_dir(locale_encode(lib_dir)) if tmp_dir is not None: core.set_tmp_dir(locale_encode(tmp_dir)) def locale_encode(path): try: import locale return path.encode(locale.getdefaultlocale()[1]) except: try: import sys return path.encode(sys.getfilesystemencoding()) except: try: return path.encode() except: return path def is_ci(): return os.environ.get('TI_CI', '') == '1' def package_root(): return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../') def is_release(): return os.environ.get('TAICHI_REPO_DIR', '') == '' def get_core_shared_object(): if is_release(): directory = os.path.join(package_root(), 'lib') else: directory = get_bin_directory() return os.path.join(directory, 'libtaichi_core.so') def get_repo(): from git import Repo repo = Repo(get_repo_directory()) return repo def print_red_bold(*args, **kwargs): print(Fore.RED + Style.BRIGHT, end='') print(*args, **kwargs) print(Style.RESET_ALL, end='') create_sand_box_on_windows = True def build(): tmp_cwd = os.getcwd() bin_dir = get_build_directory() try: os.mkdir(bin_dir) except: pass os.chdir(bin_dir) import multiprocessing print('Building taichi...') num_make_threads = min(20, multiprocessing.cpu_count()) if get_os_name() == 'win': make_ret = os.system( "msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln") else: make_ret = os.system('make -j {}'.format(num_make_threads)) if make_ret != 0: print(' Error: Build failed.') exit(-1) os.chdir(tmp_cwd) def check_exists(src): if not os.path.exists(src): raise FileNotFoundError( f'File "{src}" not exist. Installation corrupted or build incomplete?' ) def prepare_sandbox(): ''' Returns a temporary directory, which will be automatically deleted on exit. It may contain the taichi_core shared object or some misc. files. ''' import atexit import shutil from tempfile import mkdtemp tmp_dir = mkdtemp(prefix='taichi-') atexit.register(shutil.rmtree, tmp_dir) print(f'[Taichi] preparing sandbox at {tmp_dir}') os.mkdir(os.path.join(tmp_dir, 'runtime/')) return tmp_dir def get_unique_task_id(): import datetime import random return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + ( '%05d' % random.randint(0, 10000)) if is_release(): print("[Taichi] mode=release") sys.path.append(os.path.join(package_root(), 'lib')) if get_os_name() != 'win': link_src = os.path.join(package_root(), 'lib', 'taichi_core.so') link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so') # For llvm jit to find the runtime symbols if not os.path.exists(link_dst): os.symlink(link_src, link_dst) import_ti_core() if get_os_name() != 'win': dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_LOCAL) # The C backend needs a temporary directory for the generated .c and compiled .so files: ti_core.set_tmp_dir(locale_encode(prepare_sandbox( ))) # TODO: always allocate a tmp_dir for all situations ti_core.set_python_package_dir(package_root()) os.makedirs(ti_core.get_repo_dir(), exist_ok=True) else: print("[Taichi] mode=development") if get_os_name() == 'osx': bin_dir = get_bin_directory() os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory() lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib') tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) import taichi_core as ti_core os.chdir(tmp_cwd) # TODO: unify importing infrastructure: elif get_os_name() == 'linux': bin_dir = get_bin_directory() if 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/' else: os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/' lib_path = os.path.join(bin_dir, 'libtaichi_core.so') check_exists(lib_path) tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) try: import_ti_core(tmp_dir) except Exception as e: print_red_bold("Taichi core import failed: ", end='') print(e) print( Fore.YELLOW + "check this page for possible solutions:\n" "https://taichi.readthedocs.io/en/stable/install.html#troubleshooting" + Fore.RESET) raise e from None os.chdir(tmp_cwd) elif get_os_name() == 'win': bin_dir = get_bin_directory() dll_path_invalid = os.path.join(bin_dir, 'libtaichi_core.dll') assert not os.path.exists(dll_path_invalid) possible_folders = ['Debug', 'RelWithDebInfo', 'Release'] detected_dlls = [] for folder in possible_folders: dll_path = os.path.join(bin_dir, folder, 'taichi_core.dll') if os.path.exists(dll_path): detected_dlls.append(dll_path) if len(detected_dlls) == 0: raise FileNotFoundError( f'Cannot find Taichi core dll under {get_bin_directory()}/{possible_folders}' ) elif len(detected_dlls) != 1: print('Warning: multiple Taichi core dlls found:') for dll in detected_dlls: print(' ', dll) print(f'Using {detected_dlls[0]}') dll_path = detected_dlls[0] # On windows when an dll/pyd is loaded, we cannot write to it any more old_wd = os.getcwd() os.chdir(bin_dir) if create_sand_box_on_windows: # Create a sandbox for separated core lib development and loading folder = os.path.join(get_output_directory(), 'tmp', get_unique_task_id()) lib_dir = os.path.join(get_repo_directory(), 'external', 'lib') os.environ['PATH'] += ';' + lib_dir os.makedirs(folder) shutil.copy(dll_path, os.path.join(folder, 'taichi_core.pyd')) os.environ['PATH'] += ';' + folder sys.path.append(folder) else: shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd')) sys.path.append(bin_dir) try: import taichi_core as ti_core except Exception as e: print(e) print() print( 'Hint: please make sure the major and minor versions of the Python executable is correct.' ) print() raise e os.chdir(old_wd) log_level = os.environ.get('TI_LOG_LEVEL', '') if log_level: ti_core.set_logging_level(log_level) def get_dll_name(name): if get_os_name() == 'linux': return 'libtaichi_%s.so' % name elif get_os_name() == 'osx': return 'libtaichi_%s.dylib' % name elif get_os_name() == 'win': return 'taichi_%s.dll' % name else: raise Exception(f"Unknown OS: {get_os_name()}") def load_module(name, verbose=True): if verbose: print('Loading module', name) try: if get_os_name() == 'osx': mode = ctypes.RTLD_LOCAL else: mode = ctypes.RTLD_GLOBAL if '.so' in name: ctypes.PyDLL(name, mode=mode) else: ctypes.PyDLL(os.path.join(get_repo_directory(), 'build', get_dll_name(name)), mode=mode) except Exception as e: print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(name, e) + Style.RESET_ALL) def at_startup(): if not is_release(): output_dir = get_output_directory() if not os.path.exists(output_dir): print('Making output directory') os.mkdir(output_dir) ti_core.set_core_state_python_imported(True) def start_memory_monitoring(output_fn, pid=-1, interval=1): # removing dependency on psutil return import os, psutil, time if pid == -1: pid = os.getpid() import multiprocessing def task(): with open(output_fn, 'w') as f: process = psutil.Process(pid) while True: try: mem = process.memory_info().rss except: mem = -1 time.sleep(interval) print(time.time(), mem, file=f) f.flush() proc = multiprocessing.Process(target=task, daemon=True) proc.start() def require_version(major, minor=None, patch=None): versions = [ int(ti_core.get_version_major()), int(ti_core.get_version_minor()), int(ti_core.get_version_patch()), ] match = major == versions[0] and ( minor < versions[1] or minor == versions[1] and patch <= versions[2]) if match: return else: print("Taichi version mismatch. required >= {}.{}.{}".format( major, minor, patch)) print("Installed =", ti_core.get_version_string()) raise Exception("Taichi version mismatch") at_startup() def _print_taichi_header(): dev_mode = not is_release() header = '[Taichi] ' if dev_mode: header += '<dev mode>, ' else: header += f'version {ti_core.get_version_string()}, ' llvm_version = ti_core.get_llvm_version_string() header += f'llvm {llvm_version}, ' commit_hash = ti_core.get_commit_hash() commit_hash = commit_hash[:8] header += f'commit {commit_hash}, ' header += f'{get_os_name()}, ' py_ver = '.'.join(str(x) for x in sys.version_info[:3]) header += f'python {py_ver}' print(header) _print_taichi_header() __all__ = [ 'ti_core', 'build', 'load_module', 'start_memory_monitoring', 'is_release', 'package_root', 'require_version', ]
ensembler.py
#!/usr/bin/env python3.6 # Copyright (c) 2019 Trail of Bits, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import time import string import random import argparse import multiprocessing from multiprocessing import Process from collections import defaultdict from deepstate.core.fuzz import FuzzerFrontend from deepstate.executors.fuzz.afl import AFL from deepstate.executors.fuzz.honggfuzz import Honggfuzz from deepstate.executors.fuzz.angora import Angora from deepstate.executors.fuzz.eclipser import Eclipser L = logging.getLogger(__name__) class Ensembler(FuzzerFrontend): """ Ensembler is the ensemble-based fuzzer that orchestrates and invokes fuzzer frontends, while also supporting seed synchronization between those frontends. It initializes a set of global input args for each frontend, performs an "ensemble compile", and spawns fuzzers in parallel while maintaining seed synchronization between them. """ EXECUTABLES = {"FUZZER": "deepstate-ensembler"} @classmethod def parse_args(cls): parser = argparse.ArgumentParser(description="Ensemble-based fuzzer executor for DeepState") test_group = parser.add_mutually_exclusive_group(required=True) # Mutually exclusive target options test_group.add_argument("--test", type=str, \ help="Path to test case harness for compilation and instrumentation.") test_group.add_argument("--test_dir", type=str, \ help="Path to existing workspace directory with compiled and instrumented binaries.") # Compilation options parser.add_argument("-a", "--compiler_args", type=str, \ help="Compiler linker arguments for test harness, if provided as argument.") parser.add_argument("--ignore_calls", type=str, \ help="Path to static/shared libraries (colon seperated) to blackbox for taint analysis.") parser.add_argument("-w", "--workspace", type=str, default="ensemble_bins", \ help="Path to workspace to store compiled and instrumented binaries (default is `ensemble_bins`).") # Ensembler execution options parser.add_argument("-n", "--num_cores", type=int, default=multiprocessing.cpu_count(), \ help="Override number of cores to use.") parser.add_argument("--no_global", action="store_true", \ help="If set, disable global ensembler output, and instead report individual fuzzer stats.") # TODO(alan): other execution options #parser.add_argument("--fuzzers", type=str, \ # help="Comma-seperated string of fuzzers to ensemble with (overrides default ensemble).") #parser.add_argument("--abort_on_crash", action="store_true", \ # help="Stop ensembler when any base fuzzer returns a crash.") cls.parser = parser super(Ensembler, cls).parse_args() def pre_exec(self): """ Implements pre_exec method from frontend superclass, and does sanity-checking on parsed arguments before we can go ahead and provision an environment for ensemble fuzzing. """ # `--fuzzer_help` equivalent to `--help` if self.fuzzer_help: self.parser.print_help() # ignore compiler-related arguments if not necessary if self.test_dir and (self.ignore_calls or self.compiler_args): L.info("Ignoring --ignore_calls and/or --compiler_args arguments passed") # initial path check _test = self.test if not self.test_dir else self.test_dir if not os.path.exists(_test): L.error("Target path `%s` does not exist. Exiting.", _test) sys.exit(1) if not os.path.isdir(self.input_seeds): L.error("Input seeds directory `%s` does not exist. Exiting.", self.input_seeds) sys.exit(1) if not os.path.isdir(self.output_test_dir): L.warn("Output directory does not exist. Creating.") os.mkdir(self.output_test_dir) sync_dir = self.output_test_dir + "/" + self.sync_dir if not os.path.isdir(sync_dir): L.warn("Sync directory does not exist. Creating.") os.mkdir(sync_dir) elif os.path.isdir(sync_dir) and len([f for f in os.listdir(sync_dir)]) != 0: L.error("Sync directory exists and is not empty. Exiting.") sys.exit(1) @staticmethod def _init_fuzzers(ret_all=False): """ Initialize a pre-defined ensemble of fuzzer objects. Return all subcasses if param is set. Default fuzzer ensemble (four cores): afl,honggfuzz,angora,eclipser """ if ret_all: return [subclass() for subclass in FuzzerFrontend.__subclasses__()] else: return [AFL(), Honggfuzz(), Angora(), Eclipser()] def _get_tests(self, tests): """ Given a workspace path, retrieve testcases and map to specific fuzzer. We map based on the condition that the generated test binary contains an extension denoting the fuzzer name. :param tests: list of paths to workspace with already-compiled target binaries """ def _get_fuzzer(test): ext = test.split(".")[-1] if ext in ["fast", "taint"]: return "angora" elif ext == "hfuzz": return "honggfuzz" return ext.lower() fuzz_map = defaultdict(list) for test in tests: for fuzzer in self.fuzzers: if str(fuzzer).lower() == _get_fuzzer(test): fuzz_map[fuzzer].append(test) L.debug("Fuzzer and corresponding test cases: %s", fuzz_map) return fuzz_map def provision(self): """ Initializes our ensemble of fuzzers, and creates a workspace with instrumented harness binaries, if necessary. """ # manually call pre_exec (we don't use frontend's runner routine) before provisioning self.pre_exec() # initialize target - test str if user specified a harness, or a list to already-compiled binaries target = self.test if not self.test_dir else list([f for f in os.listdir(self.test_dir)]) L.info("Provisioning environment with target `%s`", target) self.fuzzers = list(self._init_fuzzers()) L.debug("Fuzzers for ensembling: %s", self.fuzzers) # given a path to a DeepState harness, provision/compile, and retrieve test bins if isinstance(target, str): L.info("Detected source target. Compiling and then retrieving harnesses from workspace.") self.targets = self._get_tests(self._provision_workspace(target)) # given a list of paths from a workspace, instantiate normally elif isinstance(target, list): L.info("Detected workspace target. Retrieving harnesses from workspace.") self.targets = self._get_tests(target) L.debug("Target for analysis: %s", self.targets) def _provision_workspace(self, test_case): """ Given a testcase source, provision a workspace with appropriate target binaries. :param test_case: path to uncompiled test case directory """ if not os.path.isdir(self.workspace): L.info("Workspace doesn't exist. Creating.") os.mkdir(self.workspace) L.info("Provisioning test case into workspace with instrumented binaries") for fuzzer in self.fuzzers: test_name = self.workspace + "/" + test_case.split(".")[0] L.debug("Compiling `%s` for fuzzer `%s`", test_name, fuzzer) cmd_map = { "compile_test": test_case, "out_test_name": test_name, "compiler_args": self.compiler_args if self.compiler_args else None } if isinstance(fuzzer, Angora): cmd_map["mode"] = "llvm" cmd_map["ignore_calls"] = self.ignore_calls fuzzer.init_from_dict(cmd_map) L.info("Compiling test case %s as `%s` with %s", test_case, test_name, fuzzer) fuzzer.compile() return [test for test in os.listdir(self.workspace)] def report(self): """ Global status reporter for ensemble fuzzing. We store and parse each individual fuzzers reporter and provide a global output during fuzzer execution. """ while True: global_stats = dict() for fuzzer in self.fuzzers: time.sleep(self.sync_cycle) stats = fuzzer.reporter() global_stats.update(stats) print("\n\n[\tEnsemble Fuzzer Status\t\t]\n") for head, stat in global_stats.items(): print(f"Total {head}\t:\t{stat}") def run_ensembler(self): """ Bootstraps all fuzzers for ensembling with appropriate arguments, and run fuzzers in parallel. TODO(alan): exit_crash arg to kill fuzzer and report when one crash is found """ def _rand_id(): return "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)) #pool = multiprocessing.Pool(processes=self.num_cores) procs = [] L.info("Initializing fuzzers for ensembling.") # for each fuzzer, instantiate fuzzer arguments manually using () rather than # the parse_args() interface in each frontend. Specific fuzzers need specific options, so # we also set those # TODO(alan): migrate instantiation to provision or _provision_workspace for fuzzer, binary in self.targets.items(): fuzzer_args = { # default fuzzer execution related options "timeout": self.timeout, "binary": self.workspace + "/" + binary[0], "input_seeds": self.input_seeds, "output_test_dir": "{}/{}_{}_out".format(self.output_test_dir, str(fuzzer), _rand_id()), "dictionary": None, "max_input_size": self.max_input_size if self.max_input_size else 8192, "mem_limit": 50, "which_test": self.which_test, "target_args": self.target_args, # set sync options for all fuzzers (TODO): configurable exec cycle # set sync_out to output global fuzzer stats, set as default "enable_sync": True, "sync_cycle": self.sync_cycle, "sync_dir": self.sync_dir, "sync_out": not self.no_global } # TODO(alan): store default dict in each fuzzer's _ARGS such that we don't need to # manually instantiate fuzzer-specific attributes # manually set and override options for Angora, due to the requirement of two binaries if isinstance(fuzzer, Angora): fuzzer_args.update({ "binary": next((self.workspace + "/" + b for b in binary if ".fast" in b), None), "taint_binary": next((self.workspace + "/" + b for b in binary if ".taint" in b), None), "no_afl": False, "mode": "llvm", "no_exploration": False }) # manually set and override "AFL modes" that configured during execution elif isinstance(fuzzer, AFL): fuzzer_args.update({ "parallel_mode": False, "dirty_mode": False, "dumb_mode": False, "qemu_mode": False, "crash_explore": False, "file": None }) # manually set Honggfuzz options elif isinstance(fuzzer, Honggfuzz): fuzzer_args.update({ "iterations": None, "persistent": False, "no_inst": False, "keep_output": False, "sanitizers": False, "clear_env": False, "save_all": True, "keep_aslr": False, "perf_instr": False, "perf_branch": False }) fuzzer.init_from_dict(fuzzer_args) # sets compiler and no_exec params before execution # Eclipser requires `dotnet` to be invoked before fuzzer executable. if isinstance(fuzzer, Eclipser): args = ("dotnet", True) else: args = (None, True) L.info("Initialized %s for ensemble-fuzzing and spinning up child proc.", fuzzer) # initialize concurrent process and add to process pool proc = Process(target=fuzzer.run, args=args) procs.append(proc) # TODO(alan): fix up delayed reporter; try not to have an individual proc run for # reporting if not self.no_global: L.info("Starting up child proc for global stats reporting.") report_proc = Process(target=self.report, args=()) procs.append(report_proc) for proc in procs: proc.start() # sleep until fuzzers finalize initialization, approx 5 seconds time.sleep(5) for proc in procs: proc.join() def main(): ensembler = Ensembler() # parse arguments and provision ensembler ensembler.parse_args() ensembler.init_from_dict() ensembler.provision() # call ensembler routine ensembler.run_ensembler() return 0 if __name__ == "__main__": exit(main())
xla_client_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Backend-dependent tests for the Python XLA client.""" import functools import itertools import re import threading import unittest from absl import flags from absl.testing import absltest from absl.testing import parameterized import numpy as np from tensorflow.compiler.xla.python import xla_client # pylint: disable=g-import-not-at-top try: from tensorflow.compiler.xla.python import custom_call_for_test except ImportError: custom_call_for_test = None bfloat16 = xla_client.bfloat16 ops = xla_client.ops FLAGS = flags.FLAGS # We choose to ignore pylint's complaints about complex comprehensions, which we # use widely for parameterizing tests. # pylint: disable=g-complex-comprehension def TestFactory(xla_backend, cloud_tpu=False): tests = [] if not cloud_tpu: int_dtypes = [np.int32, np.int64, np.uint32, np.uint64] # TODO(phawkins): test np.float16, where supported. float_dtypes = [bfloat16, np.float32, np.float64] complex_dtypes = [np.complex64, np.complex128] standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_] else: int_dtypes = [np.int32, np.uint32] float_dtypes = [np.float32] complex_dtypes = [np.complex64] standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_] dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_] class ComputationTest(parameterized.TestCase): """Base class for running an XLA Computation through the local client.""" def setUp(self): super(ComputationTest, self).setUp() self.backend = xla_backend() def _NewComputation(self, name=None): if name is None: name = self.id() return xla_client.XlaBuilder(name) def _Execute(self, c, arguments): compiled_c = self.backend.compile(c.build()) return xla_client.execute_with_python_values( compiled_c, arguments, backend=self.backend) def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected): assert expected is not None results = self._Execute(c, arguments) self.assertLen(results, len(expected)) for result, e in zip(results, expected): # Numpy's comparison methods are a bit too lenient by treating inputs as # "array-like", meaning that scalar 4 will be happily compared equal to # [[4]]. We'd like to be more strict so assert shapes as well. self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape) assert_func(result, e) def _ExecuteAndCompareExact(self, c, arguments=(), expected=None): self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected) def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-4, atol=0): self._ExecuteAndAssertWith( functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c, arguments, expected) def NumpyArrayF32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.float32 dtype.""" return np.array(*args, dtype=np.float32, **kwargs) def NumpyArrayF64(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.float64 dtype.""" return np.array(*args, dtype=np.float64, **kwargs) def NumpyArrayS32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.int32 dtype.""" return np.array(*args, dtype=np.int32, **kwargs) def NumpyArrayBool(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.bool dtype.""" return np.array(*args, dtype=np.bool, **kwargs) class ComputationPrinting(absltest.TestCase): def setUp(self): super(ComputationPrinting, self).setUp() self.backend = xla_backend() def ExampleComputation(self): builder = xla_client.XlaBuilder("acomputation") p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0))) p1 = ops.Parameter( builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32))) x = ops.Mul(p0, p1) ops.Add(x, x) return builder.build() @unittest.skipIf(cloud_tpu, "not implemented") def testCompiledHloModuleToHloText(self): computation = self.ExampleComputation() executable = self.backend.compile(computation) hlo_modules = executable.hlo_modules() self.assertLen(hlo_modules, 1) hlo_text = hlo_modules[0].to_string() self.assertTrue(hlo_text.startswith("HloModule acomputation")) self.assertIn("fusion", hlo_text) @unittest.skipIf(cloud_tpu, "not implemented") def testFlopEstimate(self): computation = self.ExampleComputation() properties = xla_client._xla.hlo_module_cost_analysis( self.backend, computation.as_hlo_module()) self.assertEqual(properties["flops"], 8.0) tests.append(ComputationPrinting) class ComputationsWithConstantsTest(ComputationTest): """Tests focusing on Constant ops.""" @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes + float_dtypes) def testConstantScalarSum(self, dtype): if dtype == np.int8 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support int8") c = self._NewComputation() ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14))) self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorMul(self, dtype): c = self._NewComputation() ops.Mul( ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)), ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype))) self._ExecuteAndCompareClose( c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorScalarDiv(self, dtype): c = self._NewComputation() ops.Div( ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)), ops.Constant(c, dtype(2.0))) self._ExecuteAndCompareClose( c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorScalarPow(self, dtype): c = self._NewComputation() ops.Pow( ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)), ops.Constant(c, dtype(2.))) self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]]) def testIota(self): c = self._NewComputation() ops.Iota(c, xla_client.PrimitiveType.F32, 10) self._ExecuteAndCompareExact( c, expected=[np.arange(10, dtype=np.float32)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes) def testBroadcastedIota(self, dtype): c = self._NewComputation() shape = xla_client.Shape.array_shape( xla_client.dtype_to_etype(dtype), (2, 3)) ops.Iota(c, shape, 1) expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype) self._ExecuteAndCompareExact(c, expected=[expected]) def testBooleanAnd(self): c = self._NewComputation() ops.And( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]]) def testBooleanOr(self): c = self._NewComputation() ops.Or( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]]) def testBooleanXor(self): c = self._NewComputation() ops.Xor( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2D(self, dtype): c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)), ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype))) self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]]) def testShiftLeft(self): c = self._NewComputation() ops.ShiftLeft( ops.Constant(c, NumpyArrayS32([3])), ops.Constant(c, NumpyArrayS32([2]))) self._ExecuteAndCompareClose(c, expected=[[12]]) def testShiftRightArithmetic(self): c = self._NewComputation() ops.ShiftRightArithmetic( ops.Constant(c, NumpyArrayS32([-2])), ops.Constant(c, NumpyArrayS32([1]))) self._ExecuteAndCompareClose(c, expected=[[-1]]) def testShiftRightLogical(self): c = self._NewComputation() ops.ShiftRightLogical( ops.Constant(c, NumpyArrayS32([-1])), ops.Constant(c, NumpyArrayS32([1]))) self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2DWith1DBroadcastDim0(self, dtype): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 0 to match the former's shape. c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)), ops.Constant(c, np.array([10, 20, 30], dtype=dtype)), broadcast_dimensions=(0,)) self._ExecuteAndCompareClose( c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2DWith1DBroadcastDim1(self, dtype): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 1 to match the former's shape. c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)), ops.Constant(c, np.array([10, 20, 30], dtype=dtype)), broadcast_dimensions=(1,)) self._ExecuteAndCompareClose( c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantAxpy(self, dtype): c = self._NewComputation() ops.Add( ops.Mul( ops.Constant(c, dtype(2)), ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))), ops.Constant(c, np.array([100, -100, 200, -200], dtype))) self._ExecuteAndCompareClose( c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3) def testCustomCall(self): if self.backend.platform != "cpu": self.skipTest("Test requires cpu platform") c = self._NewComputation() for name, fn in custom_call_for_test.cpu_custom_call_targets.items(): xla_client.register_custom_call_target(name, fn, platform="cpu") ops.CustomCallWithLayout( c, b"test_subtract_f32", operands=[ ops.Constant(c, np.float32(1.25)), ops.Constant(c, np.float32(0.5)) ], shape_with_layout=xla_client.Shape.array_shape( np.dtype(np.float32), (), ()), operand_shapes_with_layout=[ xla_client.Shape.array_shape(np.dtype(np.float32), (), ()), xla_client.Shape.array_shape(np.dtype(np.float32), (), ()), ]) self._ExecuteAndCompareClose(c, expected=[0.75]) tests.append(ComputationsWithConstantsTest) class ComputationFromProtoTest(absltest.TestCase): """Test computation execution from HLO proto.""" def setUp(self): super(ComputationFromProtoTest, self).setUp() self.backend = xla_backend() def testExecuteFromProto(self): # Build the HLO proto b = xla_client.XlaBuilder("computation") ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2))) serialized_proto = b.build().as_serialized_hlo_module_proto() # Load and execute the proto c = xla_client.XlaComputation(serialized_proto) ans, = xla_client.execute_with_python_values( self.backend.compile(c), (), backend=self.backend) np.testing.assert_equal(ans, np.int32(3)) tests.append(ComputationFromProtoTest) class ParametersTest(ComputationTest): """Tests focusing on Parameter ops and argument-passing.""" @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes) def testScalarTimesVector(self, dtype): c = self._NewComputation() arg0 = np.array(3, dtype=dtype) arg1 = np.array([10, 15, -2, 7], dtype=dtype) p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0)) p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1)) ops.Mul(p0, p1) self._ExecuteAndCompareExact( c, arguments=[arg0, arg1], expected=[arg0 * arg1]) # TODO(phawkins): test comparison harness doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testScalarMinusVectorExplicitNumbering(self, dtype): # Use explicit numbering and pass parameter_num first. Sub is used since # it's not commutative and can help catch parameter reversal within the # computation. c = self._NewComputation() arg0 = np.array(2.0, dtype=dtype) arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype) p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1)) p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0)) ops.Sub(p1, p0) self._ExecuteAndCompareClose( c, arguments=[arg0, arg1], expected=[arg1 - arg0]) tests.append(ParametersTest) class BufferTest(ComputationTest): """Tests focusing on execution with Buffers.""" def testConstantSum(self): c = self._NewComputation() ops.Add( ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14))) self._ExecuteAndCompareClose(c, expected=[4.25]) def testOneParameterSum(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Constant(c, np.float32(3.14))) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11)], expected=[4.25]) def testTwoParameterSum(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.)))) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)], expected=[4.25]) @unittest.skipIf(cloud_tpu, "not implemented") def testCannotCallWithDeletedBuffers(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Constant(c, np.float32(3.14))) arg = NumpyArrayF32(1.11) compiled_c = self.backend.compile(c.build()) arg_buffer = self.backend.buffer_from_pyval(arg) arg_buffer.delete() with self.assertRaises(RuntimeError): compiled_c.execute([arg_buffer]) def testXlaShape(self): pyval = np.array([[1., 2.]], np.float32) local_buffer = self.backend.buffer_from_pyval(pyval) xla_shape = local_buffer.xla_shape() self.assertEqual(xla_shape.dimensions(), (1, 2)) self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32)) def testBlockHostUntilReadyWorks(self): arg = np.array([[1., 2.]], np.float32) arg_buffer = self.backend.buffer_from_pyval(arg) arg_buffer.block_host_until_ready() # This test merely checks that nothing goes awry when we call # block_host_until_ready(); it's difficult to test anything else. def testBlockHostUntilReadyRaisesOnDeletedBuffer(self): arg = np.array([[1., 2.]], np.float32) buffer = self.backend.buffer_from_pyval(arg) buffer.delete() with self.assertRaisesRegex( RuntimeError, re.escape( "BlockHostUntilReady() called on deleted or donated buffer")): buffer.block_host_until_ready() def testDeviceArrayBaseSignatures(self): # When extending `DeviceArrayBase`, the object behaves as a `DeviceArray` # and thus needs to correctly implement the following methods. arg = np.array([[1., 2., 3.]], np.float32) buffer = self.backend.buffer_from_pyval(arg) if not isinstance(buffer, xla_client.DeviceArrayBase): raise unittest.SkipTest( "The objectof type {} do not extend DeviceArrayBase".format( type(buffer))) self.assertEqual(buffer.__array_priority__, 100) self.assertEqual(buffer.shape, (1, 3)) self.assertEqual(buffer.dtype, np.float32) self.assertEqual(buffer.size, 3) self.assertEqual(buffer.ndim, 2) self.assertIs(buffer, buffer.block_until_ready()) buffer.delete() with self.assertRaises(RuntimeError): buffer.block_until_ready() def testOnDeviceSizeInBytes(self): if not isinstance(self.backend, xla_client.Client): self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.") arg0 = np.array([]) arg1 = np.array([[0., 1., 2.]], np.float32) arg2 = np.array([[3., 4., 5.]], bfloat16) arg0_buffer = self.backend.buffer_from_pyval(arg0) arg1_buffer = self.backend.buffer_from_pyval(arg1) arg2_buffer = self.backend.buffer_from_pyval(arg2) self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0) # OnDeviceSizeInBytes varies depending on the platform. Confirm there's # a reasonable value. self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0) self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0) def testLiveBuffers(self): if not isinstance(self.backend, xla_client.Client): self.skipTest("TPU Driver doesn't support LiveBuffers().") self.assertEmpty(self.backend.live_buffers()) arg0 = np.array([]) arg1 = np.array([[0., 1., 2.]], np.float32) arg2 = np.array([[3., 4., 5.]], bfloat16) arg0_buffer = self.backend.buffer_from_pyval(arg0) arg1_buffer = self.backend.buffer_from_pyval(arg1) arg2_buffer = self.backend.buffer_from_pyval(arg2) self.assertLen(self.backend.live_buffers(), 3) self.assertIs(self.backend.live_buffers()[0], arg2_buffer) self.assertIs(self.backend.live_buffers()[1], arg1_buffer) self.assertIs(self.backend.live_buffers()[2], arg0_buffer) arg1_buffer.delete() self.assertLen(self.backend.live_buffers(), 2) self.assertIs(self.backend.live_buffers()[0], arg2_buffer) self.assertIs(self.backend.live_buffers()[1], arg0_buffer) arg0_buffer.delete() arg2_buffer.delete() self.assertEmpty(self.backend.live_buffers()) def testCopyToHost(self): arg0 = np.array([[1., 2.]], np.float32) arg1 = np.array([[3., 4.]], np.float32) arg0_buffer = self.backend.buffer_from_pyval(arg0) arg1_buffer = self.backend.buffer_from_pyval(arg1) # Prefetch two buffers using copy_to_host_async, and then retrieve their # values using to_py. arg0_buffer.copy_to_host_async() arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything. arg1_buffer.copy_to_host_async() np.testing.assert_equal(arg0, arg0_buffer.to_py()) np.testing.assert_equal(arg1, arg1_buffer.to_py()) # copy_to_host_async does nothing after to_py is called. arg0_buffer.copy_to_host_async() np.testing.assert_equal(arg0, arg0_buffer.to_py()) def testDevice(self): x = np.arange(8, dtype=np.int32) for device in self.backend.local_devices(): buf = self.backend.buffer_from_pyval(x, device=device) self.assertEqual(buf.device(), device) np.testing.assert_equal(x, buf.to_py()) def testStandardTypes(self): for dtype in standard_dtypes: if dtype == bfloat16 or dtype == np.complex128: continue arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype)) arr = arr.to_py() self.assertEqual(dtype, type(arr[0])) def testUnsafeBufferPointer(self): if not isinstance(self.backend, xla_client.Client): self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().") arg0 = np.array([]) arg1 = np.array([[0., 1., 2.]], np.float32) arg2 = np.array([[3., 4., 5.]], bfloat16) arg0_buffer = self.backend.buffer_from_pyval(arg0) arg1_buffer = self.backend.buffer_from_pyval(arg1) arg2_buffer = self.backend.buffer_from_pyval(arg2) self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0) self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0) self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0) @unittest.skipIf(cloud_tpu, "not implemented") def testClone(self): x = np.array([[3., 4., 5.]], np.float32) y = self.backend.buffer_from_pyval(x) z = y.clone() self.assertNotEqual(id(x), id(y)) np.testing.assert_array_equal(y.to_py(), z.to_py()) self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer()) @unittest.skipIf(cloud_tpu, "not implemented") def testJaxAttributesHaveCorrectDefaults(self): x = np.array([[3., 4., 5.]], np.float32) y = self.backend.buffer_from_pyval(x) self.assertIsNone(y.aval) self.assertIsNone(y._device) tests.append(BufferTest) class SingleOpTest(ComputationTest): """Tests for single ops. The goal here is smoke testing - to exercise the most basic functionality of single XLA ops. As minimal as possible number of additional ops are added around the op being tested. """ @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConcatenate(self, dtype): c = self._NewComputation() args = ( ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)), ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)), ) ops.ConcatInDim(c, args, dimension=0) self._ExecuteAndCompareExact( c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)]) # pyformat: disable @parameterized.named_parameters({ "testcase_name": "_{}_{}".format(src_dtype.__name__, dst_dtype.__name__), "src_dtype": src_dtype, "dst_dtype": dst_dtype, } for src_dtype, dst_dtype in itertools.permutations( [np.bool, np.int32, np.int64, np.float32, np.float64], 2)) # pyformat: enable def testConvertElementType(self, src_dtype, dst_dtype): if ((src_dtype in [np.int64, np.float64] or dst_dtype in [np.int64, np.float64]) and self.backend.platform == "tpu"): self.skipTest("TPU doesn't support float64") c = self._NewComputation() x = np.array([0, 1, 0, 0, 1], dtype=src_dtype) ops.ConvertElementType( ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 1) expected = np.array(x, dtype=dst_dtype) self.assertEqual(result[0].shape, expected.shape) self.assertEqual(result[0].dtype, expected.dtype) np.testing.assert_equal(result[0], expected) # pyformat: disable @parameterized.named_parameters( { "testcase_name": "_{}_{}".format(src_dtype.__name__, dst_dtype.__name__), "src_dtype": src_dtype, "dst_dtype": dst_dtype, } for dtypes in [[np.int32, np.float32], [np.int64, np.float64]] for src_dtype, dst_dtype in itertools.permutations(dtypes, 2)) # pyformat: enable def testBitcastConvertType(self, src_dtype, dst_dtype): if (np.float64 in (src_dtype, dst_dtype) and self.backend.platform == "tpu"): self.skipTest("TPU doesn't support float64") c = self._NewComputation() x = np.array([0, 1, 0, 0, 1], dtype=src_dtype) ops.BitcastConvertType( ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 1) expected = x.view(dst_dtype) self.assertEqual(result[0].shape, expected.shape) self.assertEqual(result[0].dtype, expected.dtype) np.testing.assert_equal(result[0], expected) # TODO(b/123523486) implement AllToAll on CPU def DISABLED_testAllToAllOneReplica(self): samples = [ NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples[:1]: c = self._NewComputation() ops.AllToAll(ops.Constant(c, lhs), 0, 0) self._ExecuteAndCompareExact(c, expected=[lhs]) def testCrossReplicaSumOneReplica(self): samples = [ NumpyArrayF32(42.0), NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples: c = self._NewComputation() ops.CrossReplicaSum(ops.Constant(c, lhs)) self._ExecuteAndCompareExact(c, expected=[lhs]) def testReplicaId(self): c = self._NewComputation() _ = ops.ReplicaId(c) self._ExecuteAndCompareExact(c, expected=[0]) def testCrossReplicaSumOneReplicaWithSingletonGroup(self): samples = [ NumpyArrayF32(42.0), NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples: c = self._NewComputation() ops.CrossReplicaSum( ops.Constant(c, lhs), xla_client.make_replica_groups([[0]])) self._ExecuteAndCompareExact(c, expected=[lhs]) # TODO(phawkins): np.dot implementation doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDotMatrixVector(self, dtype): c = self._NewComputation() lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype) rhs = np.array([[10.0], [20.0]], dtype=dtype) ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs)) self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)]) # TODO(phawkins): np.dot implementation doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDotMatrixMatrix(self, dtype): c = self._NewComputation() lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype) rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype) ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs)) self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)]) def testDotGeneral(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.make_dot_dimension_numbers( (([2], [1]), ([0], [0]))) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testDotGeneralWithDotDimensionNumbersProto(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.DotDimensionNumbers() dimension_numbers.lhs_contracting_dimensions.append(2) dimension_numbers.rhs_contracting_dimensions.append(1) dimension_numbers.lhs_batch_dimensions.append(0) dimension_numbers.rhs_batch_dimensions.append(0) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testDotGeneralWithPrecisionConfig(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.make_dot_dimension_numbers( (([2], [1]), ([0], [0]))) config = xla_client.PrecisionConfig() config.operand_precision.append(config.Precision.HIGH) config.operand_precision.append(config.Precision.HIGHEST) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers, precision_config=config) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testConvGeneralDilatedF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testConvGeneralDilatedF32WithPrecisionConfig(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) config = xla_client.PrecisionConfig() config.operand_precision.append(config.Precision.HIGHEST) config.operand_precision.append(config.Precision.DEFAULT) ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers, precision_config=config) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testConvGeneralDilatedPermutedF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NHWC", "OIHW", "CWNH"), 2) ops.ConvGeneralDilated( ops.Constant(c, np.transpose(lhs, (0, 2, 3, 1))), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers) result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.]]]]) self._ExecuteAndCompareClose( c, expected=[np.transpose(result, (1, 3, 0, 2))]) def testConvGeneralDilatedGroupedConvolutionF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 2, 2, 3) rhs = a(2, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) feature_group_count = 2 ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ], [ [0., 0., 0.], [330., 380., 160.], [0., 0., 0.], [480., 530., 220.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testBooleanNot(self): c = self._NewComputation() arr = NumpyArrayBool([True, False, True]) ops.Not(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[~arr]) def testPopulationCount(self): c = self._NewComputation() arr = NumpyArrayS32([3, 0, 1]) ops.PopulationCount(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])]) def testCountLeadingZeros(self): c = self._NewComputation() arr = NumpyArrayS32([0x7FFF, 0x12345678]) ops.Clz(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[[17, 3]]) def testExp(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Exp(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.exp(arr)]) def testExpm1(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Expm1(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)]) def testRound(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Round(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.round(arr)]) def testLog(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Log(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.log(arr)]) def testLog1p(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Log1p(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)]) def testNeg(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Neg(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[-arr]) def testFloor(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Floor(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.floor(arr)]) def testCeil(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Ceil(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)]) def testAbs(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.]) ops.Abs(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.abs(arr)]) def testTanhF32(self): c = self._NewComputation() arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001]) ops.Tanh(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)]) def testTanhF64(self): if self.backend.platform == "tpu": self.skipTest("TPU doesn't support 64bit tanh") c = self._NewComputation() arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001]) ops.Tanh(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12) def testTranspose(self): def _TransposeAndTest(array, permutation): c = self._NewComputation() ops.Transpose(ops.Constant(c, array), permutation) expected = np.transpose(array, permutation) self._ExecuteAndCompareClose(c, expected=[expected]) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0]) arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32) for permutation in itertools.permutations(range(arr.ndim)): _TransposeAndTest(arr, permutation) _TransposeAndTest(np.asfortranarray(arr), permutation) def testEq(self): c = self._NewComputation() ops.Eq( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])), ops.Constant(c, NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]]) def testNe(self): c = self._NewComputation() ops.Ne( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])), ops.Constant(c, NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]]) ops.Ne( ops.Constant(c, NumpyArrayF32([-2.0, 0.0, float("nan"), float("nan")])), ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0, float("nan")]))) self._ExecuteAndAssertWith( np.testing.assert_allclose, c, (), expected=[[True, False, True, True]]) def testGt(self): c = self._NewComputation() ops.Gt( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[False, True, True, False, False]]) def testGe(self): c = self._NewComputation() ops.Ge( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[True, True, True, False, False]]) def testLt(self): c = self._NewComputation() ops.Lt( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[False, False, False, True, True]]) def testLe(self): c = self._NewComputation() ops.Le( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[True, False, False, True, True]]) def testMax(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]]) def testMaxExplicitBroadcastDim0(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(0,)) self._ExecuteAndCompareExact( c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]]) def testMaxExplicitBroadcastDim1(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(1,)) self._ExecuteAndCompareExact( c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]]) def testMin(self): c = self._NewComputation() ops.Min( ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]]) def testPad(self): c = self._NewComputation() ops.Pad( ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), ops.Constant(c, NumpyArrayF32(0.0)), xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)])) self._ExecuteAndCompareClose( c, expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]) def testPadWithPaddingConfig(self): c = self._NewComputation() padding_config = xla_client.PaddingConfig() for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]: dimension = xla_client.PaddingConfigDimension() dimension.edge_padding_low = lo dimension.edge_padding_high = hi dimension.interior_padding = interior padding_config.dimensions.append(dimension) ops.Pad( ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), ops.Constant(c, NumpyArrayF32(0.0)), padding_config) self._ExecuteAndCompareClose( c, expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]) def testReshape(self): c = self._NewComputation() ops.Reshape( ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])), dimensions=[0, 1], new_sizes=[2, 3]) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]]) def testCollapse(self): c = self._NewComputation() ops.Collapse( ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[1, 2]) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]]) def testRev(self): c = self._NewComputation() ops.Rev( ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[0, 2]) self._ExecuteAndCompareExact( c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]]) def testReducePrecision(self): c = self._NewComputation() ops.ReducePrecision( ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])), exponent_bits=8, mantissa_bits=7) self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]]) def testClampF32(self): c = self._NewComputation() ops.Clamp( ops.Constant(c, NumpyArrayF32(-1)), ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])), ops.Constant(c, NumpyArrayF32(2))) self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]]) def testClampS32(self): c = self._NewComputation() ops.Clamp( ops.Constant(c, NumpyArrayS32(-1)), ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])), ops.Constant(c, NumpyArrayS32(2))) self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]]) def testSelect(self): c = self._NewComputation() ops.Select( ops.Constant(c, NumpyArrayBool([True, False, False, True, False])), ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])), ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5]))) self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]]) def testSlice(self): c = self._NewComputation() ops.Slice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0], [3, 2], [1, 1]) self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]]) def testSliceInDim(self): c = self._NewComputation() ops.SliceInDim( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), start_index=1, limit_index=2, stride=1, dimno=1) self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]]) ops.SliceInDim( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), start_index=0, limit_index=3, stride=2, dimno=0) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]]) def testDynamicSlice(self): c = self._NewComputation() ops.DynamicSlice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2]) self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]]) def testDynamicUpdateSlice(self): c = self._NewComputation() ops.DynamicUpdateSlice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])), [ops.Constant(c, NumpyArrayS32([1, 1]))]) self._ExecuteAndCompareExact( c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]]) def testTuple(self): c = self._NewComputation() ops.Tuple(c, [ ops.Constant(c, np.int32(42)), ops.Constant(c, NumpyArrayF32([1.0, 2.0])), ops.Constant(c, NumpyArrayBool([True, False, False, True])) ]) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 3) np.testing.assert_equal(result[0], 42) np.testing.assert_allclose(result[1], [1.0, 2.0]) np.testing.assert_equal(result[2], [True, False, False, True]) def testGetTupleElement(self): c = self._NewComputation() ops.GetTupleElement( ops.Tuple(c, [ ops.Constant(c, np.int32(42)), ops.Constant(c, NumpyArrayF32([1.0, 2.0])), ops.Constant(c, NumpyArrayBool([True, False, False, True])) ]), 1) self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]]) def testBroadcast(self): c = self._NewComputation() ops.Broadcast( ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,)) self._ExecuteAndCompareExact( c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]]) def testBroadcastInDim(self): c = self._NewComputation() ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0]) self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]]) ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1]) self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]]) def testRngNormal(self): shape = (2, 3) c = self._NewComputation() ops.RngNormal( ops.Constant(c, NumpyArrayF32(0.)), ops.Constant(c, NumpyArrayF32(1.)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape and uniqueness self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertLen(np.unique(result[0]), np.prod(shape)) def testRngUniformF32(self): lo, hi = 2., 4. shape = (2, 3) c = self._NewComputation() ops.RngUniform( ops.Constant(c, NumpyArrayF32(lo)), ops.Constant(c, NumpyArrayF32(hi)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape, uniqueness, and range self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertLen(np.unique(result[0]), np.prod(shape)) self.assertTrue(np.all(lo <= result[0])) self.assertTrue(np.all(result[0] < hi)) def testRngUniformS32(self): lo, hi = 2, 4 shape = (2, 3) c = self._NewComputation() ops.RngUniform( ops.Constant(c, NumpyArrayS32(lo)), ops.Constant(c, NumpyArrayS32(hi)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape, integrality, and range self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertEqual(result[0].dtype, np.int32) self.assertTrue(np.all(lo <= result[0])) self.assertTrue(np.all(result[0] < hi)) def testCholesky(self): l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]], dtype=np.float32) c = self._NewComputation() ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T)))) self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4) def testSort(self): keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32) c = self._NewComputation() ops.Sort(c, [ops.Constant(c, keys)], is_stable=True) self._ExecuteAndCompareClose( c, expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)]) def testSortKeyVal(self): keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32) values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32) c = self._NewComputation() ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]]) np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]]) def testSortCustomComparator(self): b = self._NewComputation("comparator") p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))) q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))) p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0))) q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0))) ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1))) comparator = b.build() keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32) values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32) c = self._NewComputation() ops.Sort( c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=1, comparator=comparator) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]]) np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]]) def testQR(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) c = self._NewComputation() ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True)) q, r = self._Execute(c, ()) np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4) def testEigh(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) a = (a + a.T) / 2 c = self._NewComputation() ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True)) # TODO(b/129396575): Turn this test back on when it passes without # fastmath. # v, w = self._Execute(c, ()) # self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3) def testSVD(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) c = self._NewComputation() ops.Tuple(c, ops.SVD(ops.Constant(c, a))) u, d, v = self._Execute(c, ()) self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3) def testTriangularSolve(self): a_vals = np.array( [[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]], dtype=np.float32) b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.float32) c = self._NewComputation() ops.TriangularSolve( ops.Constant(c, a_vals), ops.Constant(c, b_vals), left_side=False, lower=True, transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE, unit_diagonal=False) self._ExecuteAndCompareClose( c, expected=[ np.array([ [0.5, 0.08333334, 0.04629629, 0.03367003], [2.5, -0.25, -0.1388889, -0.1010101], [4.5, -0.58333331, -0.32407406, -0.23569024], ], dtype=np.float32) ], rtol=1e-4) def testIsConstant(self): c = self._NewComputation() a = ops.Constant(c, np.int32(3)) b = ops.Constant(c, np.int32(1)) x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0))) const_expr = ops.Sub(b, a) non_const_expr = ops.Mul(const_expr, x) self.assertTrue(c.is_constant(const_expr)) self.assertFalse(c.is_constant(non_const_expr)) def testGather(self): a = np.arange(9).astype(np.int32).reshape((3, 3)) indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32) dnums = xla_client.GatherDimensionNumbers() dnums.offset_dims.append(1) dnums.offset_dims.append(2) dnums.start_index_map.append(0) dnums.start_index_map.append(1) dnums.index_vector_dim = 2 c = self._NewComputation() ops.Gather( ops.Constant(c, a), ops.Constant(c, indices), dnums, slice_sizes=[1, 1]) g, = self._Execute(c, ()) expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32) np.testing.assert_allclose(g, expected, rtol=1e-4) def testFft(self): if self.backend.platform == "tpu": self.skipTest("TPU only supports 1D FFT") shape = [2, 3, 4, 5] rng = np.random.RandomState(0) a = rng.randn(*shape) + 1.0j * rng.randn(*shape) a = a.astype(np.complex64) # FFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4) # IFFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4) # RFFT b = rng.randn(*shape).astype(np.float32) c = self._NewComputation() ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4) # IRFFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8]) self._ExecuteAndCompareClose( c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4) def testNextAfter(self): c = self._NewComputation() ops.NextAfter( ops.Constant(c, np.array([1, 2], dtype=np.float32)), ops.Constant(c, np.array([2, 1], dtype=np.float32))) out, = self._Execute(c, ()) eps = np.finfo(np.float32).eps np.testing.assert_equal( np.array([eps + 1, 2 - eps], dtype=np.float32), out) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testRegularizedIncompleteBeta(self, dtype): x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538], dtype=dtype) a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606], dtype=dtype) b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677], dtype=dtype) c = self._NewComputation() ops.RegularizedIncompleteBeta( ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x)) expected = np.array( [0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155]) self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2) tests.append(SingleOpTest) class EmbeddedComputationsTest(ComputationTest): """Tests for XLA graphs with embedded computations (such as maps).""" def _CreateConstantComputation(self, in_dtype, out_dtype): """Computation (A) -> B that returns a constant 1 for any input.""" c = self._NewComputation("constant_{}_{}_one".format( in_dtype.__name__, out_dtype.__name__)) ops.Parameter(c, 0, xla_client.shape_from_pyval(np.array(0, dtype=in_dtype))) ops.Constant(c, out_dtype(1)) return c.build() def _CreateMulBy2Computation(self, dtype): """Computation (dtype) -> dtype that multiplies its parameter by 2.""" c = self._NewComputation("mul_f32_by2") ops.Mul( ops.Parameter( c, 0, xla_client.shape_from_pyval(np.array( 0, dtype=dtype)).with_major_to_minor_layout_if_absent()), ops.Constant(c, dtype(2.0))) return c.build() def _CreateMulF32ByParamComputation(self): """Computation (f32) -> f32 that multiplies one parameter by the other.""" c = self._NewComputation("mul_f32_by_param") ops.Mul( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))), ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))) return c.build() def _CreateBinaryAddComputation(self, dtype): """Computation (dtype, dtype) -> dtype that adds its two parameters.""" c = self._NewComputation("add_param0_by_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) shape = shape.with_major_to_minor_layout_if_absent() ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() def _CreateBinaryGeComputation(self, dtype): """Computation (dtype, dtype) -> bool that tests param0 >= param1.""" c = self._NewComputation("param0_lt_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) shape = shape.with_major_to_minor_layout_if_absent() ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() def _MakeSample3DArray(self, dtype): return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], dtype=dtype) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testCall(self, dtype): c = self._NewComputation() ops.Call( c, self._CreateMulBy2Computation(dtype), operands=(ops.Constant(c, dtype(5.0)),)) self._ExecuteAndCompareClose(c, expected=[10.0]) @parameterized.named_parameters({ "testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__), "in_dtype": in_dtype, "out_dtype": out_dtype, } for in_dtype, out_dtype in [[np.float32, np.int32]]) def testMapEachElementToConstant(self, in_dtype, out_dtype): c = self._NewComputation() ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))], self._CreateConstantComputation(in_dtype, out_dtype), [0]) self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testMapMulBy2(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") c = self._NewComputation() ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))], self._CreateMulBy2Computation(dtype), [0]) self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSimpleMapChain(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") # Chains a map of constant-out with a map of mul-by-2 c = self._NewComputation() const = ops.Map( c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))], self._CreateConstantComputation(dtype, dtype), [0]) ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0]) self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]]) # TODO(b/154752816): bfloat16 crashes in evaluator. @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDivVectorsWithMap(self, dtype): def DivComputation(): c = self._NewComputation("div_param0_by_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() c = self._NewComputation() ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)), ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))), DivComputation(), [0]) self._ExecuteAndCompareClose( c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSelectAndScatter(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") c = self._NewComputation() operand = ops.Constant( c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype)) window_dimensions = (2, 1) window_strides = (1, 2) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, c.get_shape(operand).dimensions(), window_dimensions, window_strides) ops.SelectAndScatterWithGeneralPadding( operand, select=self._CreateBinaryGeComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, padding=padding, source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)), init_value=ops.Constant(c, np.array(1, dtype=dtype)), scatter=self._CreateBinaryAddComputation(dtype)) self._ExecuteAndCompareClose( c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduce1DtoScalar(self, dtype): c = self._NewComputation() ops.Reduce( c, operands=[ ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)) ], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=[0]) self._ExecuteAndCompareClose(c, expected=[10]) # TODO(phawkins): test comparison harness doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}_dim{}".format(dtype.__name__, dim), "dtype": dtype, "dim": dim, } for dtype in float_dtypes if dtype != bfloat16 for dim in range(2)) def testReduce2DTo1D(self, dtype, dim): input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() ops.Reduce( c, operands=[ops.Constant(c, input_array)], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=[dim]) self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)]) @parameterized.named_parameters({ "testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims), "dtype": dtype, "dims": tuple(dims) } for dtype in float_dtypes for dims in itertools.permutations(range(3))) def testReduce3DAllPossibleWaysF32(self, dtype, dims): input_array = self._MakeSample3DArray(dtype) c = self._NewComputation() ops.Reduce( c, operands=[ops.Constant(c, input_array)], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=dims) self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowValidUnitStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 1) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowSameUnitStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 1) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.SAME, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowValidGeneralStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 2) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testWhile(self, dtype): def LessThan10Cond(): c = self._NewComputation("test_lt_10") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.))) return c.build() cond = LessThan10Cond() body = self._CreateMulBy2Computation(dtype) c = self._NewComputation() init = ops.Constant(c, dtype(1.)) ops.While(cond, body, init) self._ExecuteAndCompareClose(c, expected=[16.]) def testConditionalTrue(self): c = self._NewComputation() pred = ops.Constant(c, np.bool_(True)) true_operand = ops.Constant(c, np.float32(3.)) true_computation = self._CreateMulBy2Computation(np.float32) false_operand = ops.Constant(c, np.float32(2.)) false_computation = self._CreateConstantComputation( np.float32, np.float32) ops.Conditional(pred, true_operand, true_computation, false_operand, false_computation) self._ExecuteAndCompareClose(c, expected=[6.]) def testConditionalFalse(self): c = self._NewComputation() pred = ops.Constant(c, np.bool_(False)) true_operand = ops.Constant(c, np.float32(3.)) true_computation = self._CreateMulBy2Computation(np.float32) false_operand = ops.Constant(c, np.float32(2.)) false_computation = self._CreateConstantComputation( np.float32, np.float32) ops.Conditional(pred, true_operand, true_computation, false_operand, false_computation) self._ExecuteAndCompareClose(c, expected=[1.]) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedS32Values(self): to_infeed = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() ops.GetTupleElement( ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_infeed[0]).with_major_to_minor_layout_if_absent()), 0) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] for item in to_infeed: device.transfer_to_infeed(item) for item in to_infeed: result, = xla_client.execute_with_python_values( compiled_c, (), backend=self.backend) self.assertEqual(result, item) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedTuple(self): to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]])) c = self._NewComputation() ops.GetTupleElement( ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_infeed).with_major_to_minor_layout_if_absent()), 0) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] device.transfer_to_infeed(to_infeed) result = xla_client.execute_with_python_values( compiled_c, (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_equal(result[0], to_infeed[0]) np.testing.assert_equal(result[1], to_infeed[1]) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedThenOutfeedS32(self): to_round_trip = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() x_and_token = ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_round_trip[0]).with_major_to_minor_layout_if_absent()) x = ops.GetTupleElement(x_and_token, 0) token = ops.GetTupleElement(x_and_token, 1) outfeed_shape = xla_client.shape_from_pyval( to_round_trip[0]).with_major_to_minor_layout_if_absent() ops.OutfeedWithToken(x, token, outfeed_shape) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] for want in to_round_trip: execution = threading.Thread(target=lambda: compiled_c.execute([])) execution.start() device.transfer_to_infeed(want) got = device.transfer_from_outfeed(outfeed_shape) execution.join() self.assertEqual(want, got) def testScatter(self): a = np.arange(9).astype(np.int32).reshape((3, 3)) scatter_indices = np.array([0, 2], dtype=np.int32) updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32) dnums = xla_client.ScatterDimensionNumbers() dnums.update_window_dims.append(1) dnums.inserted_window_dims.append(0) dnums.scatter_dims_to_operand_dims.append(0) dnums.index_vector_dim = 1 c = self._NewComputation() ops.Scatter( ops.Constant(c, a), ops.Constant(c, scatter_indices), ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32), dnums) expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32) self._ExecuteAndCompareClose(c, expected=[expected]) class DeviceTest(ComputationTest): def testPlatform(self): for device in self.backend.local_devices(): self.assertEqual(device.platform, self.backend.platform) tests.append(DeviceTest) class ErrorTest(ComputationTest): def setUp(self): super(ErrorTest, self).setUp() self.f32_scalar_2 = NumpyArrayF32(2.0) self.s32_scalar_2 = NumpyArrayS32(2) def testCompileWithWrongElementTypeInLayout(self): c = self._NewComputation() c.set_op_metadata(xla_client.CurrentSourceInfoMetadata()) ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2)) c.clear_op_metadata() options = xla_client.CompileOptions() options.argument_layouts = [ xla_client.Shape.array_shape(np.dtype(np.float32), []) ] def TestFun(): return self.backend.compile(c.build(), compile_options=options) self.assertRaisesRegex( RuntimeError, r".*Invalid argument shape.*" r"expected s32\[\], got f32\[\].*", TestFun) def testInvokeWithWrongElementType(self): c = self._NewComputation() c.set_op_metadata(xla_client.CurrentSourceInfoMetadata()) ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2)) c.clear_op_metadata() def TestFun(): return xla_client.execute_with_python_values( self.backend.compile(c.build()), [self.f32_scalar_2], self.backend) self.assertRaisesRegex( RuntimeError, r"Invalid argument: Argument does not match.*" r"want s32\[\], got f32\[\].*", TestFun) tests.append(EmbeddedComputationsTest) class ComputationRootTest(ComputationTest): """Tests related to setting the root of the computation.""" def testComputationRootDifferentFromLastOp(self): c = self._NewComputation() x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0))) result = ops.Add(x, ops.Constant(c, np.float32(3.14))) ops.Add(result, ops.Constant(c, np.float32(1.618))) arg = NumpyArrayF32(1.0) compiled_c = self.backend.compile(c.build(result)) ans, = xla_client.execute_with_python_values( compiled_c, [arg], backend=self.backend) np.testing.assert_allclose(ans, 4.14) tests.append(ComputationRootTest) class SetShardingTest(ComputationTest): """Tests related to set OpSharding.""" def testSetSharding(self): c = self._NewComputation() sharding = xla_client.OpSharding() sharding.type = sharding.type.REPLICATED sharding.tile_assignment_dimensions.extend([1]) sharding.tile_assignment_devices.extend([0]) c.set_sharding(sharding) x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0))) c.clear_sharding() result = ops.Add(x, ops.Constant(c, np.float32(3.14))) ops.Add(result, ops.Constant(c, np.float32(1.618))) arg = NumpyArrayF32(1.0) compiled_c = self.backend.compile(c.build(result)) ans, = xla_client.execute_with_python_values( compiled_c, [arg], backend=self.backend) np.testing.assert_allclose(ans, 4.14) tests.append(SetShardingTest) testcase_shapes = [ (), (1,), (2, 3), (2, 0), (0, 7), (4, 1, 2), (2, 1, 3), (2, 4, 1), (3, 1), (1, 3), ] def FormatShapeAndDtype(shape, dtype): return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape))) class DLPackTest(parameterized.TestCase): def setUp(self): super(DLPackTest, self).setUp() self.backend = xla_backend() if self.backend.platform not in ("cpu", "gpu"): self.skipTest("DLPack requires CPU or GPU") # pylint: disable=g-complex-comprehension # pyformat: disable @parameterized.named_parameters({ "testcase_name": "{}_own={}".format(FormatShapeAndDtype(shape, dtype), take_ownership), "dtype": dtype, "shape": shape, "take_ownership": take_ownership } for dtype in dlpack_dtypes for shape in testcase_shapes for take_ownership in [False, True]) # pyformat: enable def testRoundTrip(self, dtype, shape, take_ownership): if dtype == np.bool_: x = np.random.randint(0, 2, size=shape).astype(np.bool_) else: x = np.array(np.random.rand(*shape) * 100, dtype=dtype) buffer = self.backend.buffer_from_pyval(x) dlt = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=take_ownership) del buffer # Free "buffer" to make sure dlt retains ownership. self.assertEqual(type(dlt).__name__, "PyCapsule") y = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend) np.testing.assert_array_equal( x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py()) def testTensorsCanBeConsumedOnceOnly(self): x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32) buffer = self.backend.buffer_from_pyval(x) dlt = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=True) def ConsumeDLPackTensor(): _ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend) ConsumeDLPackTensor() self.assertRaisesRegex( RuntimeError, ".*a DLPack tensor may be consumed at most once.*", ConsumeDLPackTensor) def testTensorsCanBeOwnedOnceOnly(self): x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32) buffer = self.backend.buffer_from_pyval(x) _ = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=True) self.assertTrue(buffer.is_deleted()) with self.assertRaisesRegex( RuntimeError, "Cannot convert deleted/invalid buffer to DLPack tensor.*"): _ = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=True) def testNonOwnedDlpackCanBeViewedTwice(self): x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32) buffer = self.backend.buffer_from_pyval(x) d1 = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=False) d2 = xla_client._xla.buffer_to_dlpack_managed_tensor( buffer, take_ownership=False) y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend) z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend) del d1, d2 np.testing.assert_array_equal(x, buffer.to_py()) np.testing.assert_array_equal(x, y.to_py()) np.testing.assert_array_equal(x, z.to_py()) tests.append(DLPackTest) class BufferProtocolTest(parameterized.TestCase): def setUp(self): super(BufferProtocolTest, self).setUp() self.backend = xla_backend() if self.backend.platform != "cpu": self.skipTest("Test requires CPU") # pylint: disable=g-complex-comprehension @parameterized.named_parameters({ "testcase_name": FormatShapeAndDtype(shape, dtype), "dtype": dtype, "shape": shape } for dtype in standard_dtypes if dtype != bfloat16 for shape in testcase_shapes) def testRoundTrip(self, dtype, shape): x = np.array(np.random.rand(*shape) * 100, dtype=dtype) x_ptr = x.__array_interface__["data"][0] buffer = self.backend.buffer_from_pyval( x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY) y = np.array(buffer, copy=False) y_ptr = y.__array_interface__["data"][0] np.testing.assert_array_equal(x, y) # If the input was sufficiently aligned, the input and output should # alias. self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr) self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer()) during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL buffer2 = self.backend.buffer_from_pyval( x, host_buffer_semantics=during_call) z = np.array(buffer2, copy=False) self.assertNotEqual(x.__array_interface__["data"][0], z.__array_interface__["data"][0]) def testDeleteWithActiveView(self): x = np.random.randn(20, 10) buffer = self.backend.buffer_from_pyval(x) buffer_ptr = buffer.unsafe_buffer_pointer() y = np.array(buffer, copy=False) buffer.delete() # It is still legal to access `y`; the array view must keep it alive. np.testing.assert_array_equal(x, y) self.assertEqual(y.__array_interface__["data"][0], buffer_ptr) tests.append(BufferProtocolTest) class TracebackTest(absltest.TestCase): def setUp(self): super(TracebackTest, self).setUp() self.backend = xla_backend() def testNoTracebacksIfDisabled(self): with xla_client.tracebacks(enabled=False): self.assertEqual(None, xla_client.Traceback.get_traceback()) buffer = self.backend.buffer_from_pyval(np.array(7, np.int32)) self.assertEqual(None, buffer.traceback) b = xla_client.XlaBuilder("computation") ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2))) e = self.backend.compile(b.build()) self.assertEqual(None, e.traceback) def assertIsTracebackContaining(self, tb, function): self.assertIsInstance(tb, xla_client.Traceback) self.assertIn(function, str(tb)) self.assertTrue(any(f.function_name == function for f in tb.frames)) def testTracebacks(self): with xla_client.tracebacks(enabled=True): tb = xla_client.Traceback.get_traceback() self.assertIsTracebackContaining(tb, "testTracebacks") # Tracebacks are not implemented on the TPU driver extension's variant # of buffers and executables. if not isinstance(self.backend, xla_client.Client): return buffer = self.backend.buffer_from_pyval(np.array(7, np.int32)) self.assertIsTracebackContaining(buffer.traceback, "testTracebacks") b = xla_client.XlaBuilder("computation") ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2))) e = self.backend.compile(b.build()) self.assertIsTracebackContaining(e.traceback, "testTracebacks") def testNestedFunction(self): def AFunction(): def AnotherFunction(): return xla_client.Traceback.get_traceback() return AnotherFunction() with xla_client.tracebacks(enabled=True): tb = AFunction() self.assertIsInstance(tb, xla_client.Traceback) frames = tb.frames i = next( i for (i, f) in enumerate(frames) if f.function_name == "AFunction") self.assertEqual(frames[i - 1].function_name, "AnotherFunction") self.assertEqual(frames[i + 1].function_name, "testNestedFunction") tests.append(TracebackTest) class ClientTest(parameterized.TestCase): def setUp(self): super(ClientTest, self).setUp() self.backend = xla_backend() def testPlatformVersion(self): # Check doesn't crash version = self.backend.platform_version if self.backend.platform == "cpu": self.assertEqual(version, "<unknown>") tests.append(ClientTest) # TODO(b/182461453): Add TFRT and cloud TPU implementation of # ReadDynamicShapes class DynamicReshapeTest(ComputationTest): """Tests related to DynamicReshape.""" def _CompareToPyAndBufferProtocol(self, builder, args, expected_results, test_fn): compiled = self.backend.compile(builder.build()) output_buffers = compiled.execute([ self.backend.buffer_from_pyval( arg, device=compiled.local_devices()[0]) for arg in args ]) self.assertLen(output_buffers, len(expected_results)) for buf, expected in zip(output_buffers, expected_results): to_py_result = buf.to_py() self.assertEqual(expected.shape, to_py_result.shape) test_fn(expected, to_py_result) if self.backend.platform == "cpu" and buf.dtype != bfloat16: mview = memoryview(buf) self.assertEqual(expected.shape, mview.shape) test_fn(expected, np.asarray(mview)) else: # Buffer protocol expected to fail on non-cpu platforms and bfloat16 # Note that np.asarray(buf) doesn't throw an exception. To test if the # error was thrown properly we must use memoryview(buf). with self.assertRaises(BufferError): memoryview(buf) # 1D reshape of full size, half size, and size of 0. @unittest.skipIf(cloud_tpu, "not implemented") @parameterized.parameters((5), (3), (0)) def testReshape1D(self, reshape_size): full_size = 5 c = self._NewComputation() arg = np.array(reshape_size, dtype=np.int32) expected = np.array(range(reshape_size), dtype=np.int32) p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg)) ops.DynamicReshape( ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size], [True]) self._CompareToPyAndBufferProtocol(c, [arg], [expected], np.testing.assert_equal) # 2D reshape with an slice on the minor dimension. We test different types # where the strides may differ between the host and devices. The reshaped # physical memory layout is not consecutive, and we test if the program can # return the correct logical view of the data. @unittest.skipIf(cloud_tpu, "not implemented") @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes + float_dtypes) def testReshape2D(self, dtype): arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) arg1 = np.array(2, dtype=np.int32) expected = np.array([[1, 2], [4, 5]], dtype=np.int32) c = self._NewComputation() p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0)) p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1)) ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True]) self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected], np.testing.assert_equal) @unittest.skipIf(cloud_tpu, "not implemented") @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes + float_dtypes) def testDynamicShapeArgs(self, dtype): full_size = 10 dynamic_shape_size = 4 # subcomputation 1 binary_add_builder = self._NewComputation() scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype)) ops.Add( ops.Parameter(binary_add_builder, 0, scalar_shape), ops.Parameter(binary_add_builder, 1, scalar_shape)) # subcomputation 2 reshape_reduce_builder = self._NewComputation() dshape = xla_client.Shape.array_shape( np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True]) reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape) ops.Reduce( reshape_reduce_builder, operands=[reshape_reduce_p], init_values=[ops.Constant(reshape_reduce_builder, dtype(0))], computation=binary_add_builder.build(), dimensions_to_reduce=[0]) # main computation: sum(range(full_size)[:dynamic_shape_size]) c = self._NewComputation() arg = np.array(dynamic_shape_size, dtype=np.int32) p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg)) reshaped = ops.DynamicReshape( ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p], [full_size], [True]) ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,)) self._ExecuteAndCompareClose(c, [arg], [dtype(6)]) tests.append(DynamicReshapeTest) return tests def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw): # Avoid creating a new backend per test (this causes GPU OOM, and is probably # inefficient). backend_fn = functools.lru_cache(maxsize=None)(backend_fn) for klass in TestFactory(backend_fn, **kw): test = type(test_prefix + klass.__name__, (klass,), {}) # Clean up the qualified names of the tests to not include the test factory. test.__qualname__ = test.__name__ globals_dict[test.__name__] = test if __name__ == "__main__": flags.DEFINE_string("backend", "cpu", "Target backend.") InstantiateTests(globals(), lambda: xla_client.get_local_backend(FLAGS.backend)) absltest.main()
mupen64plus_env.py
from http.server import BaseHTTPRequestHandler, HTTPServer #from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import abc import array import inspect import json import os import subprocess import threading import time from termcolor import cprint import yaml import gym from gym import error, spaces, utils from gym.utils import seeding import numpy as np import mss from IPython import embed import os import signal ############################################### class ImageHelper: def GetPixelColor(self, image_array, x, y): base_pixel = image_array[y][x] red = base_pixel[0] green = base_pixel[1] blue = base_pixel[2] return (red, green, blue) ############################################### ### Variables & Constants ### ############################################### config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml"))) MILLISECOND = 1.0 / 1000.0 IMAGE_HELPER = ImageHelper() ############################################### class Mupen64PlusEnv(gym.Env): __metaclass__ = abc.ABCMeta metadata = {'render.modes': ['human']} def __init__(self, rom_name): self.viewer = None self.reset_count = 0 self.step_count = 0 self.running = True self.mss_grabber = None self.episode_over = False self.numpy_array = None self.controller_server, self.controller_server_thread = self._start_controller_server() self.xvfb_process, self.emulator_process = self._start_emulator(rom_name=rom_name) self._navigate_menu() self.observation_space = \ spaces.Box(low=0, high=255, shape=(config['SCR_H'], config['SCR_W'], config['SCR_D'])) self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis [-80, 80], # Joystick Y-axis [0, 1], # A Button [0, 1], # B Button [0, 1]]) # RB Button def _step(self, action): #cprint('Step %i: %s' % (self.step_count, action), 'green') self.controller_server.send_controls(action) obs = self._observe() self.episode_over = self._evaluate_end_state() reward = self._get_reward() self.step_count += 1 return obs, reward, self.episode_over, {} def _observe(self): #cprint('Observe called!', 'yellow') if config['USE_XVFB']: offset_x = 0 offset_y = 0 else: offset_x = config['OFFSET_X'] offset_y = config['OFFSET_Y'] image_array = \ np.array(self.mss_grabber.grab({"top": offset_y, "left": offset_x, "width": config['SCR_W'], "height": config['SCR_H']}), dtype=np.uint8) # drop the alpha channel and flip red and blue channels (BGRA -> RGB) self.numpy_array = \ np.flip(image_array[:, :, :3], 2) return self.numpy_array @abc.abstractmethod def _navigate_menu(self): return @abc.abstractmethod def _get_reward(self): #cprint('Get Reward called!', 'yellow') return 0 @abc.abstractmethod def _evaluate_end_state(self): #cprint('Evaluate End State called!', 'yellow') return False @abc.abstractmethod def _reset(self): cprint('Reset called!', 'yellow') self.reset_count += 1 self.step_count = 0 return self._observe() def _render(self, mode='human', close=False): if close: if self.viewer is not None: self.viewer.close() self.viewer = None return img = self.numpy_array if mode == 'rgb_array': return img elif mode == 'human': from gym.envs.classic_control import rendering if self.viewer is None: self.viewer = rendering.SimpleImageViewer() self.viewer.imshow(img) def _close(self): cprint('Close called!', 'yellow') self.running = False embed() self._kill_emulator() self._stop_controller_server() def _start_controller_server(self): server = ControllerHTTPServer(('', config['PORT_NUMBER']), config['ACTION_TIMEOUT']) server_thread = threading.Thread(target=server.serve_forever, args=()) server_thread.daemon = True server_thread.start() print('ControllerHTTPServer started on port ', config['PORT_NUMBER']) return server, server_thread def _stop_controller_server(self): #cprint('Stop Controller Server called!', 'yellow') if self.controller_server is not None: self.controller_server.shutdown() def _start_emulator(self, rom_name, res_w=config['SCR_W'], res_h=config['SCR_H'], res_d=config['SCR_D'], input_driver_path=config['INPUT_DRIVER_PATH']): rom_path = os.path.abspath( os.path.join(os.path.dirname(inspect.stack()[0][1]), '../ROMs', rom_name)) if not os.path.isfile(rom_path): msg = "ROM not found: " + rom_path cprint(msg, 'red') raise Exception(msg) input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path)) if not os.path.isfile(input_driver_path): msg = "Input driver not found: " + input_driver_path cprint(msg, 'red') raise Exception(msg) cmd = [config['MUPEN_CMD'], "--resolution", "%ix%i" % (res_w, res_h), "--audio", "dummy", "--input", input_driver_path, rom_path] initial_disp = os.environ["DISPLAY"] cprint('Initially on DISPLAY %s' % initial_disp, 'red') xvfb_proc = None if config['USE_XVFB']: display_num = -1 #Displaynum hate success = False # If we couldn't find an open display number after 15 attempts, give up while not success and display_num <= 15: display_num += 1 xvfb_cmd = [config['XVFB_CMD'], ":" + str(display_num), "-screen", "0", "%ix%ix%i" % (res_w, res_h, res_d * 8), "-fbdir", config['TMP_DIR']] cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow') xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT, preexec_fn=os.setsid) time.sleep(2) # Give xvfb a couple seconds to start up # Poll the process to see if it exited early # (most likely due to a server already active on the display_num) if xvfb_proc.poll() is None: success = True print('') if not success: msg = "Failed to initialize Xvfb!" cprint(msg, 'red') raise Exception(msg) os.environ["DISPLAY"] = ":" + str(display_num) cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue') cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red') cmd = [config['VGLRUN_CMD']] + cmd cprint('Starting emulator with comand: %s' % cmd, 'yellow') emulator_process = subprocess.Popen(cmd, env=os.environ.copy(), shell=False, stderr=subprocess.STDOUT) # TODO: Test and cleanup: # May need to initialize this after the DISPLAY env var has been set # so it attaches to the correct X display; otherwise screenshots may # come from the wrong place. This used to be true when we were using # wxPython for screenshots. Untested after switching to mss. cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red') self.mss_grabber = mss.mss() time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary # Restore the DISPLAY env var os.environ["DISPLAY"] = initial_disp cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red') emu_mon = EmulatorMonitor() monitor_thread = threading.Thread(target=emu_mon.monitor_emulator, args=[emulator_process]) monitor_thread.daemon = True monitor_thread.start() return xvfb_proc, emulator_process def _kill_emulator(self): #cprint('Kill Emulator called!', 'yellow') try: self.controller_server.send_controls(ControllerState.NO_OP) if self.emulator_process is not None: self.emulator_process.kill() if self.xvfb_process is not None: os.killpg(os.getpgid(self.xvfb_process.pid),signal.SIGTERM) #self.xvfb_process.kill() except AttributeError: pass # We may be shut down during intialization before these attributes have been set ############################################### class EmulatorMonitor: def monitor_emulator(self, emulator): emu_return = emulator.poll() while emu_return is None: time.sleep(2) emu_return = emulator.poll() # TODO: this means our environment died... need to die too print('Emulator closed with code: ' + str(emu_return)) ############################################### class ControllerState(object): # Controls NO_OP = [0, 0, 0, 0, 0] A_BUTTON = [0, 0, 1, 0, 0] B_BUTTON = [0, 0, 0, 1, 0] RB_BUTTON = [0, 0, 0, 0, 1] JOYSTICK_UP = [0, 80, 0, 0, 0] JOYSTICK_DOWN = [0, -80, 0, 0, 0] JOYSTICK_LEFT = [-80, 0, 0, 0, 0] JOYSTICK_RIGHT = [80, 0, 0, 0, 0] def __init__(self, controls=NO_OP, start_button=0): self.START_BUTTON = start_button self.X_AXIS = controls[0] self.Y_AXIS = controls[1] self.A_BUTTON = controls[2] self.B_BUTTON = controls[3] self.R_TRIG = controls[4] self.L_TRIG = 0 self.Z_TRIG = 0 def to_json(self): return json.dumps(self.__dict__) ############################################### class ControllerHTTPServer(HTTPServer, object): def __init__(self, server_address, control_timeout): self.control_timeout = control_timeout self.controls = ControllerState() self.hold_response = True self.running = True super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler) def send_controls(self, controls, start_button=0): #print('Send controls called') self.controls = ControllerState(controls, start_button) self.hold_response = False # Wait for controls to be sent: start = time.time() while not self.hold_response and time.time() < start + self.control_timeout: time.sleep(MILLISECOND) def shutdown(self): self.running = False super(ControllerHTTPServer, self).shutdown() class ControllerRequestHandler(BaseHTTPRequestHandler, object): def log_message(self, format, *args): pass def write_response(self, resp_code, resp_data): self.send_response(resp_code) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(resp_data.encode("utf-8")) def do_GET(self): while self.server.running and self.server.hold_response: time.sleep(MILLISECOND) if not self.server.running: print('Sending SHUTDOWN response') # TODO: This sometimes fails with a broken pipe because # the emulator has already stopped. Should handle gracefully self.write_response(500, "SHUTDOWN") ### respond with controller output self.write_response(200, self.server.controls.to_json()) self.server.hold_response = True return ###############################################
managers.py
__all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] import sys import threading import array import queue from time import time as _time from traceback import format_exc from . import connection from .context import reduction, get_spawning_popen from . import pool from . import process from . import util from . import get_context def reduce_array(a): return array.array, (a.typecode, a.tobytes()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items', 'keys', 'values') ] if view_types[0] is not list: def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: reduction.register(view_type, rebuild_as_list) class Token(object): """ Type to uniquely indentify a shared object """ __slots__ = 'typeid', 'address', 'id' def __init__(self, typeid, address, id): self.typeid, self.address, self.id = typeid, address, id def __getstate__(self): return self.typeid, self.address, self.id def __setstate__(self, state): self.typeid, self.address, self.id = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % (self.__class__. __name__, self.typeid, self.address, self.id) def dispatch(c, id, methodname, args=(), kwds={}): """ Send a message to manager using connection `c` and return response """ c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return '\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75 def all_methods(obj): """ Return a list of names of methods of `obj` """ temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): """ Return a list of names of methods of `obj` which do not start with '_' """ return [name for name in all_methods(obj) if name[0] != '_'] class Server(object): """ Server class which runs in a process controlled by a manager object """ public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.id_to_local_proxy_obj = {} self.mutex = threading.Lock() def serve_forever(self): """ Run the server forever """ self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() def handle_request(self, c): """ Handle a new connection """ funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = '#TRACEBACK', format_exc() else: try: result = func(c, *args, **kwds) except Exception: msg = '#TRACEBACK', format_exc() else: msg = '#RETURN', result try: c.send(msg) except Exception as e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): """ Handle requests from the proxies in a particular process/thread """ util.debug('starting server thread to service %r', threading. current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request try: obj, exposed, gettypeid = id_to_obj[ident] except KeyError as ke: try: obj, exposed, gettypeid = self.id_to_local_proxy_obj[ ident] except KeyError as second_ke: raise ke if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % ( methodname, type(obj), exposed)) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as e: msg = '#ERROR', e else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = '#PROXY', (rexposed, token) else: msg = '#RETURN', res except AttributeError: if methodname is None: msg = '#TRACEBACK', format_exc() else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func(self, conn, ident, obj, * args, **kwds) msg = '#RETURN', result except Exception: msg = '#TRACEBACK', format_exc() except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = '#TRACEBACK', format_exc() try: try: send(msg) except Exception as e: send(('#UNSERIALIZABLE', format_exc())) except Exception as e: util.info('exception in thread serving %r', threading. current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = {'__str__': fallback_str, '__repr__': fallback_repr, '#GETVALUE': fallback_getvalue} def dummy(self, c): pass def debug_info(self, c): """ Return some info --- useful to spot problems with refcounting """ with self.mutex: result = [] keys = list(self.id_to_refcount.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % ( ident, self.id_to_refcount[ident], str(self. id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): """ Number of shared objects """ return len(self.id_to_refcount) def shutdown(self, c): """ Shutdown this process """ try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, *args, **kwds): """ Create a new shared object and return its id """ with self.mutex: callable, exposed, method_to_typeid, proxytype = self.registry[ typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = obj, set(exposed), method_to_typeid if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): """ Return the methods of the shared object indicated by token """ return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): """ Spawn a new thread to serve this connection """ threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: try: self.id_to_refcount[ident] += 1 except KeyError as ke: if ident in self.id_to_local_proxy_obj: self.id_to_refcount[ident] = 1 self.id_to_obj[ident] = self.id_to_local_proxy_obj[ident] obj, exposed, gettypeid = self.id_to_obj[ident] util.debug('Server re-enabled tracking & INCREF %r', ident) else: raise ke def decref(self, c, ident): if (ident not in self.id_to_refcount and ident in self. id_to_local_proxy_obj): util.debug('Server DECREF skipping %r', ident) return with self.mutex: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_refcount[ident] if ident not in self.id_to_refcount: self.id_to_obj[ident] = None, (), None util.debug('disposing of obj with id %r', ident) with self.mutex: del self.id_to_obj[ident] class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 listener_client = {'pickle': (connection.Listener, connection.Client), 'xmlrpclib': (connection.XmlListener, connection.XmlClient)} class BaseManager(object): """ Base class for managers """ _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx =None): if authkey is None: authkey = process.current_process().authkey self._address = address self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def get_server(self): """ Return server object with serve_forever() method and address attribute """ assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self. _serializer) def connect(self): """ Connect manager object to the server process """ Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): """ Spawn a server process for this manager object """ assert self._state.value == State.INITIAL if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') reader, writer = connection.Pipe(duplex=False) self._process = self._ctx.Process(target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self. _serializer, writer, initializer, initargs)) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() writer.close() self._address = reader.recv() reader.close() self._state.value = State.STARTED self.shutdown = util.Finalize(self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): """ Create a server, report its address and run it """ if initializer is not None: initializer(*initargs) server = cls._Server(registry, address, authkey, serializer) writer.send(server.address) writer.close() util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): """ Create a new shared object; return the token and exposed tuple """ assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,) + args, kwds ) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): """ Join the manager process (if it has been spawned) """ if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): """ Return some info about the servers shared objects and connections """ conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): """ Return the number of shared objects """ conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() assert self._state.value == State.STARTED return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): """ Shutdown the manager process; will be registered as a finalizer """ if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): """ Register a typeid with the manager type """ if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in list(method_to_typeid.items()): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = callable, exposed, method_to_typeid, proxytype if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype(token, self._serializer, manager=self, authkey=self._authkey, exposed=exp) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () class BaseProxy(object): """ A base for proxies of shared objects """ _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset self._tls = tls_idset[0] self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] self._owned_by_manager = manager_owned if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): """ Try to call a method of the referrent and return a copy of the result """ try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading. current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype(token, self._serializer, manager=self. _manager, authkey=self._authkey, exposed=exposed) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): """ Get a copy of the value of the referent """ return self._callmethod('#GETVALUE') def _incref(self): if self._owned_by_manager: util.debug('owned_by_manager skipped INCREF of %r', self._token.id) return conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize(self, BaseProxy._decref, args=(self. _token, self._authkey, state, self._tls, self._idset, self. _Client), exitpriority=10) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) if state is None or state.value == State.STARTED: try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as e: util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return RebuildProxy, (AutoProxy, self._token, self._serializer, kwds) else: return RebuildProxy, (type(self), self._token, self._serializer, kwds) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % (type(self).__name__, self ._token.typeid, id(self)) def __str__(self): """ Return representation of the referent (or a fall-back if that fails) """ try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" def RebuildProxy(func, token, serializer, kwds): """ Function used for unpickling proxy objects. """ server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: util.debug('Rebuild a proxy owned by manager, token=%r', token) kwds['manager_owned'] = True if token.id not in server.id_to_local_proxy_obj: server.id_to_local_proxy_obj[token.id] = server.id_to_obj[token.id] incref = kwds.pop('incref', True) and not getattr(process. current_process(), '_inheriting', False) return func(token, serializer, incref=incref, **kwds) def MakeProxyType(name, exposed, _cache={}): """ Return a proxy type whose methods are given by `exposed` """ exposed = tuple(exposed) try: return _cache[name, exposed] except KeyError: pass dic = {} for meth in exposed: exec( """def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)""" % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[name, exposed] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): """ Return an auto-proxy for `token` """ _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)' % (type(self).__name__, self._typecode, self._value ) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class IteratorProxy(BaseProxy): _exposed_ = '__next__', 'send', 'throw', 'close' def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = 'acquire', 'release' def acquire(self, blocking=True, timeout=None): args = (blocking,) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = 'acquire', 'release', 'wait', 'notify', 'notify_all' def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = _time() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - _time() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = 'is_set', 'set', 'clear', 'wait' def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = '__getattribute__', 'wait', 'abort', 'reset' def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties',)) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting',)) @property def broken(self): return self._callmethod('__getattribute__', ('broken',)) class NamespaceProxy(BaseProxy): _exposed_ = '__getattribute__', '__setattr__', '__delattr__' def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = 'get', 'set' def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ('__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__')) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ('__contains__', '__delitem__', '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values')) ArrayProxy = MakeProxyType('ArrayProxy', ('__len__', '__getitem__', '__setitem__')) BasePoolProxy = MakeProxyType('PoolProxy', ('apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate')) BasePoolProxy._method_to_typeid_ = {'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator'} class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() class SyncManager(BaseManager): """ Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. """ SyncManager.register('Queue', queue.Queue) SyncManager.register('JoinableQueue', queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False)
recorder.py
from __future__ import annotations import os import time import threading import typing as t import picamera from pisat.handler import DigitalInputHandlerBase from pisat.util.about_time import get_time_stamp from .utils import IterSingleton _INF = float("inf") class _GPIOPins(IterSingleton[int]): GPIO00 = 0 GPIO01 = 1 GPIO02 = 2 GPIO03 = 3 GPIO04 = 4 GPIO05 = 5 GPIO06 = 6 GPIO07 = 7 GPIO08 = 8 GPIO09 = 9 GPIO10 = 10 GPIO11 = 11 GPIO12 = 12 GPIO13 = 13 GPIO14 = 14 GPIO15 = 15 GPIO16 = 16 GPIO17 = 17 GPIO18 = 18 GPIO19 = 19 GPIO20 = 20 GPIO21 = 21 GPIO22 = 22 GPIO23 = 23 GPIO24 = 24 GPIO25 = 25 GPIO26 = 26 GPIO_MIN = GPIO00 GPIO_MAX = GPIO26 class _VideoFormats(IterSingleton[str]): h264 = "h264" mjpeg = "mjpeg" yuv = "yuv" rgb = "rgb" rgba = "rgba" bgr = "bgr" bgra = "bgra" GPIOPins = _GPIOPins() VideoFormats = _VideoFormats() def isvalid_video_format(path: str) -> bool: ext = os.path.splitext(path)[1] if not len(ext): return False return ext[1:] in VideoFormats class IORecorder: def __init__( self, handler: DigitalInputHandlerBase, fname: t.Optional[str] = None, resolution: t.Tuple = (640, 480) ) -> None: if fname is None: fname = get_time_stamp("mov", "h264") elif isvalid_video_format(fname): raise ValueError(f"'{fname}' has an invalid extension.") self._camera = picamera.PiCamera() self._camera.resolution = resolution self._fname = fname self._handler: DigitalInputHandlerBase = handler @property def is_high(self) -> bool: return self._handler.observe() def start_record( self, interval: float = 1., timeout: float = -1, start_with_low: bool = False, ) -> None: if timeout <= 0: timeout = _INF if start_with_low: while self.is_high: pass self._flag = False self._camera.start_recording(self._fname) time_init = time.time() try: while not self.state and not self._flag: if time.time() - time_init >= timeout: break self._camera.wait_recording(timeout=interval) finally: self._camera.stop_recording() class ThreadingIORecorder(IORecorder): def __init__( self, handler: DigitalInputHandlerBase, fname: t.Optional[str] = None, resolution: t.Tuple = (640, 480), ) -> None: super().__init__(handler, fname=fname, resolution=resolution) self._handler: DigitalInputHandlerBase = handler self._thread: t.Optional[threading.Thread] = None def start_record( self, interval: float = 1., timeout: float = -1, start_with_low: bool = False, ) -> ThreadingIORecorder: self._thread = threading.Thread( target=super().start_record, args=(interval, timeout, start_with_low), ) self._thread.start() return self def stop_record(self, timeout: float = -1) -> bool: if self._thread is None: return self._flag = True self._thread.join(timeout=timeout) if not self._thread.is_alive(): self._flag = False return self._flag def __enter__(self) -> ThreadingIORecorder: return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.stop_record()
progress_bar.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Package Docs.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import sys import threading import time class ProgressBar: """.""" def __init__(self): """.""" self._is_alive = False def start(self, consumer=sys.stdout.write, caption=''): """.""" self._caption = caption if callable(consumer): self._consumer = consumer if not self._is_alive: self._is_alive = True thread = threading.Thread(target=self._run) thread.start() def _run(self): """.""" width = 16 status = 1 direction = 'right' while self._is_alive: before_blank = ' ' * (status - 1) after_blank = ' ' * (width - status) text = '%s [%s=%s]' % (self._caption, before_blank, after_blank) self._consumer(text) if direction == 'right': status += 1 else: status -= 1 if status == width: direction = 'left' if status == 1: direction = 'right' time.sleep(0.5) def stop(self): """.""" self._is_alive = False
kivy_ui.py
import json import re import time from copy import copy from datetime import datetime from functools import partial import subprocess from subprocess import Popen, PIPE from threading import Thread from collections import namedtuple from kivy.logger import Logger import io import os import atexit import yaml from PIL import Image as PilImage import pandas as pd import numpy as np import plotly.express as px from kivy.clock import Clock from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.image import Image from kivy.core.image import Image as CoreImage from kivy.properties import NumericProperty, ObjectProperty, StringProperty, \ ListProperty, BooleanProperty from kivy.uix.label import Label from kivy.uix.popup import Popup from kivy.lang.builder import Builder from kivy.core.window import Window from kivy.uix.screenmanager import ScreenManager, Screen from kivy.uix.scrollview import ScrollView from kivy.uix.spinner import SpinnerOption, Spinner from donkeycar import load_config from donkeycar.parts.tub_v2 import Tub from donkeycar.pipeline.augmentations import ImageAugmentation from donkeycar.pipeline.database import PilotDatabase from donkeycar.pipeline.types import TubRecord from donkeycar.utils import get_model_by_type from donkeycar.pipeline.training import train Builder.load_file(os.path.join(os.path.dirname(__file__), 'ui.kv')) Window.clearcolor = (0.2, 0.2, 0.2, 1) LABEL_SPINNER_TEXT = 'Add/remove' # Data struct to show tub field in the progress bar, containing the name, # the name of the maximum value in the config file and if it is centered. FieldProperty = namedtuple('FieldProperty', ['field', 'max_value_id', 'centered']) def get_norm_value(value, cfg, field_property, normalised=True): max_val_key = field_property.max_value_id max_value = getattr(cfg, max_val_key, 1.0) out_val = value / max_value if normalised else value * max_value return out_val def tub_screen(): return App.get_running_app().tub_screen if App.get_running_app() else None def pilot_screen(): return App.get_running_app().pilot_screen if App.get_running_app() else None def train_screen(): return App.get_running_app().train_screen if App.get_running_app() else None def car_screen(): return App.get_running_app().car_screen if App.get_running_app() else None def recursive_update(target, source): """ Recursively update dictionary """ if isinstance(target, dict) and isinstance(source, dict): for k, v in source.items(): v_t = target.get(k) if not recursive_update(v_t, v): target[k] = v return True else: return False def decompose(field): """ Function to decompose a string vector field like 'gyroscope_1' into a tuple ('gyroscope', 1) """ field_split = field.split('_') if len(field_split) > 1 and field_split[-1].isdigit(): return '_'.join(field_split[:-1]), int(field_split[-1]) return field, None class RcFileHandler: """ This handles the config file which stores the data, like the field mapping for displaying of bars and last opened car, tub directory. """ # These entries are expected in every tub, so they don't need to be in # the file known_entries = [ FieldProperty('user/angle', '', centered=True), FieldProperty('user/throttle', '', centered=False), FieldProperty('pilot/angle', '', centered=True), FieldProperty('pilot/throttle', '', centered=False), ] def __init__(self, file_path='~/.donkeyrc'): self.file_path = os.path.expanduser(file_path) self.data = self.create_data() recursive_update(self.data, self.read_file()) self.field_properties = self.create_field_properties() def exit_hook(): self.write_file() # Automatically save config when program ends atexit.register(exit_hook) def create_field_properties(self): """ Merges known field properties with the ones from the file """ field_properties = {entry.field: entry for entry in self.known_entries} field_list = self.data.get('field_mapping') if field_list is None: field_list = {} for entry in field_list: assert isinstance(entry, dict), \ 'Dictionary required in each entry in the field_mapping list' field_property = FieldProperty(**entry) field_properties[field_property.field] = field_property return field_properties def create_data(self): data = dict() data['user_pilot_map'] = {'user/throttle': 'pilot/throttle', 'user/angle': 'pilot/angle'} return data def read_file(self): if os.path.exists(self.file_path): with open(self.file_path) as f: data = yaml.load(f, Loader=yaml.FullLoader) Logger.info(f'Donkeyrc: Donkey file {self.file_path} loaded.') return data else: Logger.warn(f'Donkeyrc: Donkey file {self.file_path} does not ' f'exist.') return {} def write_file(self): if os.path.exists(self.file_path): Logger.info(f'Donkeyrc: Donkey file {self.file_path} updated.') with open(self.file_path, mode='w') as f: self.data['time_stamp'] = datetime.now() data = yaml.dump(self.data, f) return data rc_handler = RcFileHandler() class MySpinnerOption(SpinnerOption): """ Customization for Spinner """ pass class MySpinner(Spinner): """ Customization of Spinner drop down menu """ def __init__(self, **kwargs): super().__init__(option_cls=MySpinnerOption, **kwargs) class FileChooserPopup(Popup): """ File Chooser popup window""" load = ObjectProperty() root_path = StringProperty() filters = ListProperty() class FileChooserBase: """ Base class for file chooser widgets""" file_path = StringProperty("No file chosen") popup = ObjectProperty(None) root_path = os.path.expanduser('~') title = StringProperty(None) filters = ListProperty() def open_popup(self): self.popup = FileChooserPopup(load=self.load, root_path=self.root_path, title=self.title, filters=self.filters) self.popup.open() def load(self, selection): """ Method to load the chosen file into the path and call an action""" self.file_path = str(selection[0]) self.popup.dismiss() self.load_action() def load_action(self): """ Virtual method to run when file_path has been updated """ pass class ConfigManager(BoxLayout, FileChooserBase): """ Class to mange loading of the config file from the car directory""" config = ObjectProperty(None) file_path = StringProperty(rc_handler.data.get('car_dir', '')) def load_action(self): """ Load the config from the file path""" if self.file_path: try: path = os.path.join(self.file_path, 'config.py') self.config = load_config(path) # If load successful, store into app config rc_handler.data['car_dir'] = self.file_path except FileNotFoundError: Logger.error(f'Config: Directory {self.file_path} has no ' f'config.py') except Exception as e: Logger.error(f'Config: {e}') class TubLoader(BoxLayout, FileChooserBase): """ Class to manage loading or reloading of the Tub from the tub directory. Loading triggers many actions on other widgets of the app. """ file_path = StringProperty(rc_handler.data.get('last_tub', '')) tub = ObjectProperty(None) len = NumericProperty(1) records = None def load_action(self): """ Update tub from the file path""" if self.update_tub(): # If update successful, store into app config rc_handler.data['last_tub'] = self.file_path def update_tub(self, event=None): if not self.file_path: return False # If config not yet loaded return cfg = tub_screen().ids.config_manager.config if not cfg: return False # At least check if there is a manifest file in the tub path if not os.path.exists(os.path.join(self.file_path, 'manifest.json')): tub_screen().status(f'Path {self.file_path} is not a valid tub.') return False try: self.tub = Tub(self.file_path) except Exception as e: tub_screen().status(f'Failed loading tub: {str(e)}') return False # Check if filter is set in tub screen expression = tub_screen().ids.tub_filter.filter_expression # Use filter, this defines the function def select(underlying): if not expression: return True else: try: record = TubRecord(cfg, self.tub.base_path, underlying) res = eval(expression) return res except KeyError as err: Logger.error(f'Filter: {err}') return True self.records = [TubRecord(cfg, self.tub.base_path, record) for record in self.tub if select(record)] self.len = len(self.records) if self.len > 0: tub_screen().index = 0 tub_screen().ids.data_plot.update_dataframe_from_tub() msg = f'Loaded tub {self.file_path} with {self.len} records' else: msg = f'No records in tub {self.file_path}' if expression: msg += f' using filter {tub_screen().ids.tub_filter.record_filter}' tub_screen().status(msg) return True class LabelBar(BoxLayout): """ Widget that combines a label with a progress bar. This is used to display the record fields in the data panel.""" field = StringProperty() field_property = ObjectProperty() config = ObjectProperty() msg = '' def update(self, record): """ This function is called everytime the current record is updated""" if not record: return field, index = decompose(self.field) if field in record.underlying: val = record.underlying[field] if index is not None: val = val[index] # Update bar if a field property for this field is known if self.field_property: norm_value = get_norm_value(val, self.config, self.field_property) new_bar_val = (norm_value + 1) * 50 if \ self.field_property.centered else norm_value * 100 self.ids.bar.value = new_bar_val self.ids.field_label.text = self.field if isinstance(val, float) or isinstance(val, np.float32): text = f'{val:+07.3f}' elif isinstance(val, int): text = f'{val:10}' else: text = str(val) self.ids.value_label.text = text else: Logger.error(f'Record: Bad record {record.underlying["_index"]} - ' f'missing field {self.field}') class DataPanel(BoxLayout): """ Data panel widget that contains the label/bar widgets and the drop down menu to select/deselect fields.""" record = ObjectProperty() # dual mode is used in the pilot arena where we only show angle and # throttle or speed dual_mode = BooleanProperty(False) auto_text = StringProperty(LABEL_SPINNER_TEXT) throttle_field = StringProperty('user/throttle') link = False def __init__(self, **kwargs): super().__init__(**kwargs) self.labels = {} self.screen = ObjectProperty() def add_remove(self): """ Method to add or remove a LabelBar. Depending on the value of the drop down menu the LabelBar is added if it is not present otherwise removed.""" field = self.ids.data_spinner.text if field is LABEL_SPINNER_TEXT: return if field in self.labels and not self.dual_mode: self.remove_widget(self.labels[field]) del(self.labels[field]) self.screen.status(f'Removing {field}') else: # in dual mode replace the second entry with the new one if self.dual_mode and len(self.labels) == 2: k, v = list(self.labels.items())[-1] self.remove_widget(v) del(self.labels[k]) field_property = rc_handler.field_properties.get(decompose(field)[0]) cfg = tub_screen().ids.config_manager.config lb = LabelBar(field=field, field_property=field_property, config=cfg) self.labels[field] = lb self.add_widget(lb) lb.update(self.record) if len(self.labels) == 2: self.throttle_field = field self.screen.status(f'Adding {field}') if self.screen.name == 'tub': self.screen.ids.data_plot.plot_from_current_bars() self.ids.data_spinner.text = LABEL_SPINNER_TEXT self.auto_text = field def on_record(self, obj, record): """ Kivy function that is called every time self.record changes""" for v in self.labels.values(): v.update(record) def clear(self): for v in self.labels.values(): self.remove_widget(v) self.labels.clear() class FullImage(Image): """ Widget to display an image that fills the space. """ def __init__(self, **kwargs): super().__init__(**kwargs) self.core_image = None def update(self, record): """ This method is called ever time a record gets updated. """ try: img_arr = self.get_image(record) pil_image = PilImage.fromarray(img_arr) bytes_io = io.BytesIO() pil_image.save(bytes_io, format='png') bytes_io.seek(0) self.core_image = CoreImage(bytes_io, ext='png') self.texture = self.core_image.texture except KeyError as e: Logger.error('Record: Missing key:', e) except Exception as e: Logger.error('Record: Bad record:', e) def get_image(self, record): return record.image(cached=False) class ControlPanel(BoxLayout): """ Class for control panel navigation. """ screen = ObjectProperty() speed = NumericProperty(1.0) record_display = StringProperty() clock = None fwd = None def start(self, fwd=True, continuous=False): """ Method to cycle through records if either single <,> or continuous <<, >> buttons are pressed :param fwd: If we go forward or backward :param continuous: If we do <<, >> or <, > :return: None """ time.sleep(0.1) call = partial(self.step, fwd, continuous) if continuous: self.fwd = fwd s = float(self.speed) * tub_screen().ids.config_manager.config.DRIVE_LOOP_HZ cycle_time = 1.0 / s else: cycle_time = 0.08 self.clock = Clock.schedule_interval(call, cycle_time) def step(self, fwd=True, continuous=False, *largs): """ Updating a single step and cap/floor the index so we stay w/in the tub. :param fwd: If we go forward or backward :param continuous: If we are in continuous mode <<, >> :param largs: dummy :return: None """ new_index = self.screen.index + (1 if fwd else -1) if new_index >= tub_screen().ids.tub_loader.len: new_index = 0 elif new_index < 0: new_index = tub_screen().ids.tub_loader.len - 1 self.screen.index = new_index msg = f'Donkey {"run" if continuous else "step"} ' \ f'{"forward" if fwd else "backward"}' if not continuous: msg += f' - you can also use {"<right>" if fwd else "<left>"} key' else: msg += ' - you can toggle run/stop with <space>' self.screen.status(msg) def stop(self): if self.clock: self.clock.cancel() self.clock = None def restart(self): if self.clock: self.stop() self.start(self.fwd, True) def update_speed(self, up=True): """ Method to update the speed on the controller""" values = self.ids.control_spinner.values idx = values.index(self.ids.control_spinner.text) if up and idx < len(values) - 1: self.ids.control_spinner.text = values[idx + 1] elif not up and idx > 0: self.ids.control_spinner.text = values[idx - 1] def set_button_status(self, disabled=True): """ Method to disable(enable) all buttons. """ self.ids.run_bwd.disabled = self.ids.run_fwd.disabled = \ self.ids.step_fwd.disabled = self.ids.step_bwd.disabled = disabled def on_keyboard(self, key, scancode): """ Method to chack with keystroke has ben sent. """ if key == ' ': if self.clock and self.clock.is_triggered: self.stop() self.set_button_status(disabled=False) self.screen.status('Donkey stopped') else: self.start(continuous=True) self.set_button_status(disabled=True) elif scancode == 79: self.step(fwd=True) elif scancode == 80: self.step(fwd=False) elif scancode == 45: self.update_speed(up=False) elif scancode == 46: self.update_speed(up=True) class PaddedBoxLayout(BoxLayout): pass class TubEditor(PaddedBoxLayout): """ Tub editor widget. Contains left/right index interval and the manipulator buttons for deleting / restoring and reloading """ lr = ListProperty([0, 0]) def set_lr(self, is_l=True): """ Sets left or right range to the current tub record index """ if not tub_screen().current_record: return self.lr[0 if is_l else 1] = tub_screen().current_record.underlying['_index'] def del_lr(self, is_del): """ Deletes or restores records in chosen range """ tub = tub_screen().ids.tub_loader.tub if self.lr[1] >= self.lr[0]: selected = list(range(*self.lr)) else: last_id = tub.manifest.current_index selected = list(range(self.lr[0], last_id)) selected += list(range(self.lr[1])) tub.delete_records(selected) if is_del else tub.restore_records(selected) class TubFilter(PaddedBoxLayout): """ Tub filter widget. """ filter_expression = StringProperty(None) record_filter = StringProperty(rc_handler.data.get('record_filter', '')) def update_filter(self): filter_text = self.ids.record_filter.text # empty string resets the filter if filter_text == '': self.record_filter = '' self.filter_expression = '' rc_handler.data['record_filter'] = self.record_filter tub_screen().status(f'Filter cleared') return filter_expression = self.create_filter_string(filter_text) try: record = tub_screen().current_record res = eval(filter_expression) status = f'Filter result on current record: {res}' if isinstance(res, bool): self.record_filter = filter_text self.filter_expression = filter_expression rc_handler.data['record_filter'] = self.record_filter else: status += ' - non bool expression can\'t be applied' status += ' - press <Reload tub> to see effect' tub_screen().status(status) except Exception as e: tub_screen().status(f'Filter error on current record: {e}') @staticmethod def create_filter_string(filter_text, record_name='record'): """ Converts text like 'user/angle' into 'record.underlying['user/angle'] so that it can be used in a filter. Will replace only expressions that are found in the tub inputs list. :param filter_text: input text like 'user/throttle > 0.1' :param record_name: name of the record in the expression :return: updated string that has all input fields wrapped """ for field in tub_screen().current_record.underlying.keys(): field_list = filter_text.split(field) if len(field_list) > 1: filter_text = f'{record_name}.underlying["{field}"]'\ .join(field_list) return filter_text class DataPlot(PaddedBoxLayout): """ Data plot panel which embeds matplotlib interactive graph""" df = ObjectProperty(force_dispatch=True, allownone=True) def plot_from_current_bars(self, in_app=True): """ Plotting from current selected bars. The DataFrame for plotting should contain all bars except for strings fields and all data is selected if bars are empty. """ tub = tub_screen().ids.tub_loader.tub field_map = dict(zip(tub.manifest.inputs, tub.manifest.types)) # Use selected fields or all fields if nothing is slected all_cols = tub_screen().ids.data_panel.labels.keys() or self.df.columns cols = [c for c in all_cols if decompose(c)[0] in field_map and field_map[decompose(c)[0]] not in ('image_array', 'str')] df = self.df[cols] if df is None: return # Don't plot the milliseconds time stamp as this is a too big number df = df.drop(labels=['_timestamp_ms'], axis=1, errors='ignore') if in_app: tub_screen().ids.graph.df = df else: fig = px.line(df, x=df.index, y=df.columns, title=tub.base_path) fig.update_xaxes(rangeslider=dict(visible=True)) fig.show() def unravel_vectors(self): """ Unravels vector and list entries in tub which are created when the DataFrame is created from a list of records""" manifest = tub_screen().ids.tub_loader.tub.manifest for k, v in zip(manifest.inputs, manifest.types): if v == 'vector' or v == 'list': dim = len(tub_screen().current_record.underlying[k]) df_keys = [k + f'_{i}' for i in range(dim)] self.df[df_keys] = pd.DataFrame(self.df[k].tolist(), index=self.df.index) self.df.drop(k, axis=1, inplace=True) def update_dataframe_from_tub(self): """ Called from TubManager when a tub is reloaded/recreated. Fills the DataFrame from records, and updates the dropdown menu in the data panel.""" generator = (t.underlying for t in tub_screen().ids.tub_loader.records) self.df = pd.DataFrame(generator).dropna() to_drop = {'cam/image_array'} self.df.drop(labels=to_drop, axis=1, errors='ignore', inplace=True) self.df.set_index('_index', inplace=True) self.unravel_vectors() tub_screen().ids.data_panel.ids.data_spinner.values = self.df.columns self.plot_from_current_bars() class TabBar(BoxLayout): manager = ObjectProperty(None) def disable_only(self, bar_name): this_button_name = bar_name + '_btn' for button_name, button in self.ids.items(): button.disabled = button_name == this_button_name class TubScreen(Screen): """ First screen of the app managing the tub data. """ index = NumericProperty(None, force_dispatch=True) current_record = ObjectProperty(None) keys_enabled = BooleanProperty(True) def initialise(self, e): self.ids.config_manager.load_action() self.ids.tub_loader.update_tub() def on_index(self, obj, index): """ Kivy method that is called if self.index changes""" self.current_record = self.ids.tub_loader.records[index] self.ids.slider.value = index def on_current_record(self, obj, record): """ Kivy method that is called if self.current_record changes.""" self.ids.img.update(record) i = record.underlying['_index'] self.ids.control_panel.record_display = f"Record {i:06}" def status(self, msg): self.ids.status.text = msg def on_keyboard(self, instance, keycode, scancode, key, modifiers): if self.keys_enabled: self.ids.control_panel.on_keyboard(key, scancode) class PilotLoader(BoxLayout, FileChooserBase): """ Class to mange loading of the config file from the car directory""" num = StringProperty() model_type = StringProperty() pilot = ObjectProperty(None) filters = ['*.h5', '*.tflite'] def load_action(self): if self.file_path and self.pilot: try: self.pilot.load(os.path.join(self.file_path)) rc_handler.data['pilot_' + self.num] = self.file_path rc_handler.data['model_type_' + self.num] = self.model_type except FileNotFoundError: Logger.error(f'Pilot: Model {self.file_path} not found') except Exception as e: Logger.error(f'Pilot: {e}') def on_model_type(self, obj, model_type): """ Kivy method that is called if self.model_type changes. """ if self.model_type and self.model_type != 'Model type': cfg = tub_screen().ids.config_manager.config if cfg: self.pilot = get_model_by_type(self.model_type, cfg) self.ids.pilot_button.disabled = False def on_num(self, e, num): """ Kivy method that is called if self.num changes. """ self.file_path = rc_handler.data.get('pilot_' + self.num, '') self.model_type = rc_handler.data.get('model_type_' + self.num, '') class OverlayImage(FullImage): """ Widget to display the image and the user/pilot data for the tub. """ keras_part = ObjectProperty() pilot_record = ObjectProperty() throttle_field = StringProperty('user/throttle') def get_image(self, record): from donkeycar.management.makemovie import MakeMovie img_arr = copy(super().get_image(record)) augmentation = pilot_screen().augmentation if pilot_screen().auglist \ else None if augmentation: img_arr = pilot_screen().augmentation.augment(img_arr) angle = record.underlying['user/angle'] throttle = get_norm_value(record.underlying[self.throttle_field], tub_screen().ids.config_manager.config, rc_handler.field_properties[ self.throttle_field]) rgb = (0, 255, 0) MakeMovie.draw_line_into_image(angle, throttle, False, img_arr, rgb) if not self.keras_part: return img_arr output = self.keras_part.evaluate(record, augmentation) rgb = (0, 0, 255) MakeMovie.draw_line_into_image(output[0], output[1], True, img_arr, rgb) out_record = copy(record) out_record.underlying['pilot/angle'] = output[0] # rename and denormalise the throttle output pilot_throttle_field \ = rc_handler.data['user_pilot_map'][self.throttle_field] out_record.underlying[pilot_throttle_field] \ = get_norm_value(output[1], tub_screen().ids.config_manager.config, rc_handler.field_properties[self.throttle_field], normalised=False) self.pilot_record = out_record return img_arr class PilotScreen(Screen): """ Screen to do the pilot vs pilot comparison .""" index = NumericProperty(None, force_dispatch=True) current_record = ObjectProperty(None) keys_enabled = BooleanProperty(False) auglist = ListProperty(force_dispatch=True) augmentation = ObjectProperty() config = ObjectProperty() def on_index(self, obj, index): """ Kivy method that is called if self.index changes. Here we update self.current_record and the slider value. """ if tub_screen().ids.tub_loader.records: self.current_record = tub_screen().ids.tub_loader.records[index] self.ids.slider.value = index def on_current_record(self, obj, record): """ Kivy method that is called when self.current_index changes. Here we update the images and the control panel entry.""" i = record.underlying['_index'] self.ids.pilot_control.record_display = f"Record {i:06}" self.ids.img_1.update(record) self.ids.img_2.update(record) def initialise(self, e): self.ids.pilot_loader_1.on_model_type(None, None) self.ids.pilot_loader_1.load_action() self.ids.pilot_loader_2.on_model_type(None, None) self.ids.pilot_loader_2.load_action() mapping = copy(rc_handler.data['user_pilot_map']) del(mapping['user/angle']) self.ids.data_in.ids.data_spinner.values = mapping.keys() self.ids.data_in.ids.data_spinner.text = 'user/angle' self.ids.data_panel_1.ids.data_spinner.disabled = True self.ids.data_panel_2.ids.data_spinner.disabled = True def map_pilot_field(self, text): """ Method to return user -> pilot mapped fields except for the intial vale called Add/remove. """ if text == LABEL_SPINNER_TEXT: return text return rc_handler.data['user_pilot_map'][text] def set_brightness(self, val=None): if self.ids.button_bright.state == 'down': self.config.AUG_MULTIPLY_RANGE = (val, val) if self.ids.button_blur.state == 'down': self.auglist = ['MULTIPLY', 'BLUR'] else: self.auglist = ['MULTIPLY'] def remove_brightness(self): self.auglist = ['BLUR'] if self.ids.button_blur.state == 'down' else[] def set_blur(self, val=None): if self.ids.button_blur.state == 'down': self.config.AUG_BLUR_RANGE = (val, val) if self.ids.button_bright.state == 'down': self.auglist = ['MULTIPLY', 'BLUR'] else: self.auglist = ['BLUR'] def remove_blur(self): self.auglist = ['MULTIPLY'] if self.ids.button_bright.state == 'down' \ else [] def on_auglist(self, obj, auglist): self.config.AUGMENTATIONS = self.auglist self.augmentation = ImageAugmentation(self.config) self.on_current_record(None, self.current_record) def status(self, msg): self.ids.status.text = msg def on_keyboard(self, instance, keycode, scancode, key, modifiers): if self.keys_enabled: self.ids.pilot_control.on_keyboard(key, scancode) class ScrollableLabel(ScrollView): pass class DataFrameLabel(Label): pass class TransferSelector(BoxLayout, FileChooserBase): """ Class to select transfer model""" filters = ['*.h5'] class TrainScreen(Screen): """ Class showing the training screen. """ config = ObjectProperty(force_dispatch=True, allownone=True) database = ObjectProperty() pilot_df = ObjectProperty(force_dispatch=True) tub_df = ObjectProperty(force_dispatch=True) def train_call(self, model_type, *args): # remove car directory from path tub_path = tub_screen().ids.tub_loader.tub.base_path transfer = self.ids.transfer_spinner.text if transfer != 'Choose transfer model': transfer = os.path.join(self.config.MODELS_PATH, transfer + '.h5') else: transfer = None try: history = train(self.config, tub_paths=tub_path, model_type=model_type, transfer=transfer, comment=self.ids.comment.text) self.ids.status.text = f'Training completed.' self.ids.train_button.state = 'normal' self.ids.transfer_spinner.text = 'Choose transfer model' self.reload_database() except Exception as e: self.ids.status.text = f'Train error {e}' def train(self, model_type): self.config.SHOW_PLOT = False Thread(target=self.train_call, args=(model_type,)).start() self.ids.status.text = f'Training started.' self.ids.comment.text = 'Comment' def set_config_attribute(self, input): try: val = json.loads(input) except ValueError: val = input att = self.ids.cfg_spinner.text.split(':')[0] setattr(self.config, att, val) self.ids.cfg_spinner.values = self.value_list() self.ids.status.text = f'Setting {att} to {val} of type ' \ f'{type(val).__name__}' def value_list(self): if self.config: return [f'{k}: {v}' for k, v in self.config.__dict__.items()] else: return ['select'] def on_config(self, obj, config): if self.config and self.ids: self.ids.cfg_spinner.values = self.value_list() self.reload_database() def reload_database(self): if self.config: self.database = PilotDatabase(self.config) def on_database(self, obj, database): if self.ids.check.state == 'down': self.pilot_df, self.tub_df = self.database.to_df_tubgrouped() self.ids.scroll_tubs.text = self.tub_df.to_string() else: self.pilot_df = self.database.to_df() self.tub_df = pd.DataFrame() self.ids.scroll_tubs.text = '' self.pilot_df.drop(columns=['History', 'Config'], errors='ignore', inplace=True) text = self.pilot_df.to_string(formatters=self.formatter()) self.ids.scroll_pilots.text = text values = ['Choose transfer model'] if not self.pilot_df.empty: values += self.pilot_df['Name'].tolist() self.ids.transfer_spinner.values = values @staticmethod def formatter(): def time_fmt(t): fmt = '%Y-%m-%d %H:%M:%S' return datetime.fromtimestamp(t).strftime(format=fmt) def transfer_fmt(model_name): return model_name.replace('.h5', '') return {'Time': time_fmt, 'Transfer': transfer_fmt} class CarScreen(Screen): """ Screen for interacting with the car. """ config = ObjectProperty(force_dispatch=True, allownone=True) files = ListProperty() car_dir = StringProperty(rc_handler.data.get('robot_car_dir', '~/mycar')) pull_bar = NumericProperty(0) push_bar = NumericProperty(0) event = ObjectProperty(None, allownone=True) connection = ObjectProperty(None, allownone=True) pid = NumericProperty(None, allownone=True) pilots = ListProperty() is_connected = BooleanProperty(False) def initialise(self): self.event = Clock.schedule_interval(self.connected, 3) def list_remote_dir(self, dir): if self.is_connected: cmd = f'ssh {self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}' + \ f' "ls {dir}"' listing = os.popen(cmd).read() adjusted_listing = listing.split('\n')[1:-1] return adjusted_listing else: return [] def list_car_dir(self, dir): self.car_dir = dir self.files = self.list_remote_dir(dir) # non-empty director found if self.files: rc_handler.data['robot_car_dir'] = dir def update_pilots(self): model_dir = os.path.join(self.car_dir, 'models') self.pilots = self.list_remote_dir(model_dir) def pull(self, tub_dir): target = f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}' + \ f':{os.path.join(self.car_dir, tub_dir)}' if self.ids.create_dir.state == 'normal': target += '/' dest = self.config.DATA_PATH cmd = ['rsync', '-rv', '--progress', '--partial', target, dest] Logger.info('car pull: ' + str(cmd)) proc = Popen(cmd, shell=False, stdout=PIPE, text=True, encoding='utf-8', universal_newlines=True) repeats = 100 call = partial(self.show_progress, proc, repeats, True) event = Clock.schedule_interval(call, 0.0001) def send_pilot(self): src = self.config.MODELS_PATH cmd = ['rsync', '-rv', '--progress', '--partial', src, f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}:' + f'{self.car_dir}'] Logger.info('car push: ' + ' '.join(cmd)) proc = Popen(cmd, shell=False, stdout=PIPE, encoding='utf-8', universal_newlines=True) repeats = 1 call = partial(self.show_progress, proc, repeats, False) event = Clock.schedule_interval(call, 0.0001) def show_progress(self, proc, repeats, is_pull, e): if proc.poll() is not None: # call ended this stops the schedule return False # find the next repeats lines with update info count = 0 while True: stdout_data = proc.stdout.readline() if stdout_data: # find 'to-check=33/4551)' which is end of line pattern = 'to-check=(.*)\)' res = re.search(pattern, stdout_data) if res: if count < repeats: count += 1 else: remain, total = tuple(res.group(1).split('/')) bar = 100 * (1. - float(remain) / float(total)) if is_pull: self.pull_bar = bar else: self.push_bar = bar return True else: # end of stream command completed if is_pull: button = self.ids['pull_tub'] self.pull_bar = 0 else: button = self.ids['send_pilots'] self.push_bar = 0 self.update_pilots() button.disabled = False return False def connected(self, event): if not self.config: return if self.connection is None: if not hasattr(self.config, 'PI_USERNAME') or \ not hasattr(self.config, 'PI_HOSTNAME'): self.ids.connected.text = 'Requires PI_USERNAME, PI_HOSTNAME' return # run new command to check connection status cmd = ['ssh', '-o ConnectTimeout=3', f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}', 'date'] Logger.info('car check: ' + ' '.join(cmd)) self.connection = Popen(cmd, shell=False, stdout=PIPE, text=True, encoding='utf-8', universal_newlines=True) else: # ssh is already running, check where we are return_val = self.connection.poll() self.is_connected = False if return_val is None: # command still running, do nothing and check next time again status = 'Awaiting connection...' self.ids.connected.color = 0.8, 0.8, 0.0, 1 else: # command finished, check if successful and reset connection if return_val == 0: status = 'Connected' self.ids.connected.color = 0, 0.9, 0, 1 self.is_connected = True else: status = 'Disconnected' self.ids.connected.color = 0.9, 0, 0, 1 self.connection = None self.ids.connected.text = status def drive(self): model_args = '' if self.ids.pilot_spinner.text != 'No pilot': model_path = os.path.join(self.car_dir, "models", self.ids.pilot_spinner.text) model_args = f'--type {self.ids.type_spinner.text} ' + \ f'--model {model_path}' cmd = ['ssh', f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}', f'source env/bin/activate; cd {self.car_dir}; ./manage.py ' f'drive {model_args} 2>&1'] Logger.info(f'car connect: {cmd}') proc = Popen(cmd, shell=False, stdout=PIPE, text=True, encoding='utf-8', universal_newlines=True) while True: stdout_data = proc.stdout.readline() if stdout_data: # find 'PID: 12345' pattern = 'PID: .*' res = re.search(pattern, stdout_data) if res: try: self.pid = int(res.group(0).split('PID: ')[1]) Logger.info(f'car connect: manage.py drive PID: ' f'{self.pid}') except Exception as e: Logger.error(f'car connect: {e}') return Logger.info(f'car connect: {stdout_data}') else: return def stop(self): if self.pid: cmd = f'ssh {self.config.PI_USERNAME}@{self.config.PI_HOSTNAME} '\ + f'kill {self.pid}' out = os.popen(cmd).read() Logger.info(f"car connect: Kill PID {self.pid} + {out}") self.pid = None class StartScreen(Screen): img_path = os.path.realpath(os.path.join( os.path.dirname(__file__), '../parts/web_controller/templates/static/donkeycar-logo-sideways.png')) pass class DonkeyApp(App): start_screen = None tub_screen = None train_screen = None pilot_screen = None car_screen = None title = 'Donkey Manager' def initialise(self, event): self.tub_screen.ids.config_manager.load_action() self.pilot_screen.initialise(event) self.car_screen.initialise() # This builds the graph which can only happen after everything else # has run, therefore delay until the next round. Clock.schedule_once(self.tub_screen.ids.tub_loader.update_tub) def build(self): self.start_screen = StartScreen(name='donkey') self.tub_screen = TubScreen(name='tub') self.train_screen = TrainScreen(name='train') self.pilot_screen = PilotScreen(name='pilot') self.car_screen = CarScreen(name='car') Window.bind(on_keyboard=self.tub_screen.on_keyboard) Window.bind(on_keyboard=self.pilot_screen.on_keyboard) Clock.schedule_once(self.initialise) sm = ScreenManager() sm.add_widget(self.start_screen) sm.add_widget(self.tub_screen) sm.add_widget(self.train_screen) sm.add_widget(self.pilot_screen) sm.add_widget(self.car_screen) return sm def main(): tub_app = DonkeyApp() tub_app.run() if __name__ == '__main__': main()
test_partition_20.py
import threading import pytest from base.partition_wrapper import ApiPartitionWrapper from base.client_base import TestcaseBase from common import common_func as cf from common import common_type as ct from common.common_type import CaseLabel, CheckTasks from common.code_mapping import PartitionErrorMessage prefix = "partition_" class TestPartitionParams(TestcaseBase): """ Test case of partition interface in parameters""" @pytest.mark.tags(CaseLabel.L0) def test_partition_default(self): """ target: verify create a partition method: create a partition expected: create successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) description = cf.gen_unique_str("desc_") self.init_partition_wrap(collection_w, partition_name, description=description, check_task=CheckTasks.check_partition_property, check_items={"name": partition_name, "description": description, "is_empty": True, "num_entities": 0} ) # check that the partition has been created assert collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("partition_name", [""]) def test_partition_empty_name(self, partition_name): """ target: verify create a partition with empty name method: create a partition with empty name expected: raise exception """ # create a collection collection_w = self.init_collection_wrap() # create partition self.partition_wrap.init_partition(collection_w.collection, partition_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"}) @pytest.mark.tags(CaseLabel.L2) def test_partition_empty_description(self): """ target: verify create a partition with empty description method: create a partition with empty description expected: create successfully """ # create collection collection_w = self.init_collection_wrap() # init partition partition_name = cf.gen_unique_str(prefix) description = "" self.init_partition_wrap(collection_w, partition_name, description=description, check_task=CheckTasks.check_partition_property, check_items={"name": partition_name, "description": description, "is_empty": True, "num_entities": 0} ) # check that the partition has been created assert collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) def test_partition_max_description_length(self): """ target: verify create a partition with 255 length name and 1024 length description method: create a partition with 255 length name and 1024 length description expected: create successfully """ # create collection collection_w = self.init_collection_wrap() # init partition partition_name = cf.gen_str_by_length(255) description = cf.gen_str_by_length(2048) self.init_partition_wrap(collection_w, partition_name, description=description, check_task=CheckTasks.check_partition_property, check_items={"name": partition_name, "description": description, "is_empty": True} ) @pytest.mark.tags(CaseLabel.L1) def test_partition_dup_name(self): """ target: verify create partitions with duplicate names method: create partitions with duplicate names expected: 1. create successfully 2. the same partition returned with diff object ids """ # create a collection collection_w = self.init_collection_wrap() # create two partitions partition_name = cf.gen_unique_str(prefix) description = cf.gen_unique_str() partition_w1 = self.init_partition_wrap(collection_w, partition_name, description) partition_w2 = self.init_partition_wrap(collection_w, partition_name, description) # public check func to be extracted assert id(partition_w1.partition) != id(partition_w2.partition) assert partition_w1.name == partition_w2.name assert partition_w1.description == partition_w2.description @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("description", ct.get_invalid_strs) def test_partition_special_chars_description(self, description): """ target: verify create a partition with special characters in description method: create a partition with special characters in description expected: create successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) self.init_partition_wrap(collection_w, partition_name, description=description, check_task=CheckTasks.check_partition_property, check_items={"name": partition_name, "description": description, "is_empty": True, "num_entities": 0} ) assert collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L0) def test_partition_default_name(self): """ target: verify create a partition with default name method: 1. get the _default partition 2. create a partition with _default name expected: the same partition returned """ # create collection collection_w = self.init_collection_wrap() # check that the default partition exists assert collection_w.has_partition(ct.default_partition_name)[0] # check that can get the _default partition collection, _ = collection_w.partition(ct.default_partition_name) # check that init the _default partition object partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name) assert collection.name == partition_w.name @pytest.mark.tags(CaseLabel.L2) def test_partition_max_length_name(self): """ target: verify create a partition with max length(256) name method: create a partition with max length name expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_str_by_length(256) self.partition_wrap.init_partition(collection_w.collection, partition_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, 'err_msg': "is illegal"} ) @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("partition_name", ct.get_invalid_strs) def test_partition_invalid_name(self, partition_name): """ target: verify create a partition with invalid name method: create a partition with invalid names expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # create partition self.partition_wrap.init_partition(collection_w.collection, partition_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, 'err_msg': "is illegal"} ) # TODO: need an error code issue #5144 and assert independently @pytest.mark.tags(CaseLabel.L2) def test_partition_none_collection(self): """ target: verify create a partition with none collection method: create a partition with none collection expected: raise exception """ # create partition with collection is None partition_name = cf.gen_unique_str(prefix) self.partition_wrap.init_partition(collection=None, name=partition_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "must be pymilvus.Collection"}) @pytest.mark.tags(CaseLabel.L1) def test_partition_drop(self): """ target: verify drop a partition in one collection method: 1. create a partition in one collection 2. drop the partition expected: drop successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) # check that the partition exists assert collection_w.has_partition(partition_name)[0] # drop partition partition_w.drop() # check that the partition not exists assert not collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L1) def test_partition_release(self): """ target: verify release partition method: 1. create a collection and two partitions 2. insert data into each partition 3. flush and load the both partitions 4. release partition1 5. release partition1 twice expected: 1. the 1st partition is released 2. the 2nd partition is not released """ # create collection collection_w = self.init_collection_wrap() # create two partitions partition_w1 = self.init_partition_wrap(collection_w) partition_w2 = self.init_partition_wrap(collection_w) # insert data to two partition partition_w1.insert(cf.gen_default_list_data()) partition_w2.insert(cf.gen_default_list_data()) # load two partitions partition_w1.load() partition_w2.load() # search two partitions search_vectors = cf.gen_vectors(1, ct.default_dim) res1, _ = partition_w1.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1) res2, _ = partition_w2.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1) assert len(res1) == 1 and len(res2) == 1 # release the first partition partition_w1.release() # check result res1, _ = partition_w1.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1, check_task=ct.CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"}) res2, _ = partition_w2.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1) assert len(res2) == 1 @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10), cf.gen_default_list_data(10), cf.gen_default_tuple_data(10)]) def test_partition_insert(self, data): """ target: verify insert entities multiple times method: 1. create a collection and a partition 2. partition.insert(data) 3. insert data again expected: insert data successfully """ nums = 10 # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name, check_task=CheckTasks.check_partition_property, check_items={"name": partition_name, "is_empty": True, "num_entities": 0} ) # insert data partition_w.insert(data) # self._connect().flush([collection_w.name]) # don't need flush for issue #5737 assert not partition_w.is_empty assert partition_w.num_entities == nums # insert data partition_w.insert(data) # self._connect().flush([collection_w.name]) assert not partition_w.is_empty assert partition_w.num_entities == (nums + nums) class TestPartitionOperations(TestcaseBase): """ Test case of partition interface in operations """ @pytest.mark.tags(CaseLabel.L1) def test_partition_dropped_collection(self): """ target: verify create partition against a dropped collection method: 1. create a collection 2. drop collection 3. create partition in collection expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # drop collection collection_w.drop() # create partition failed self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix), check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "can't find collection"}) @pytest.mark.tags(CaseLabel.L2) def test_partition_same_name_in_diff_collections(self): """ target: verify create partitions with same name in diff collections method: 1. create a partition in collection1 2. create a partition in collection2 expected: create successfully """ # create two collections collection_w1 = self.init_collection_wrap() collection_w2 = self.init_collection_wrap() # create 2 partitions in 2 diff collections partition_name = cf.gen_unique_str(prefix) self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name) self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name) # check result assert collection_w1.has_partition(partition_name)[0] assert collection_w2.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) def test_partition_multi_partitions_in_collection(self): """ target: verify create multiple partitions in one collection method: create multiple partitions in one collection expected: create successfully """ # create collection collection_w = self.init_collection_wrap() for _ in range(10): partition_name = cf.gen_unique_str(prefix) # create partition with different names and check the partition exists self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) @pytest.mark.skip(reason="skip temporarily for debug") def test_partition_maximum_partitions(self): """ target: verify create maximum partitions method: 1. create maximum partitions 2. create one more partition expected: raise exception """ threads_num = 8 threads = [] def create_partition(collection, threads_n): for _ in range(ct.max_partition_num // threads_n): name = cf.gen_unique_str(prefix) par_wrap = ApiPartitionWrapper() par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing) collection_w = self.init_collection_wrap() for _ in range(threads_num): t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num)) threads.append(t) t.start() for t in threads: t.join() p_name = cf.gen_unique_str() self.partition_wrap.init_partition( collection_w.collection, p_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "maximum partition's number should be limit to 4096"}) # TODO: Try to verify load collection with a large number of partitions. #11651 @pytest.mark.tags(CaseLabel.L0) def test_partition_drop_default_partition(self): """ target: verify drop the _default partition method: drop the _default partition expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # get the default partition default_partition, _ = collection_w.partition(ct.default_partition_name) partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name) assert default_partition.name == partition_w.name # verify that drop partition with error partition_w.drop(check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"}) @pytest.mark.tags(CaseLabel.L1) def test_partition_drop_partition_twice(self): """ target: verify drop the same partition twice method: 1.create a partition with default schema 2. drop the partition 3. drop the same partition again expected: raise exception for 2nd time """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) collection_w.has_partition(partition_name) # drop partition partition_w.drop() assert not collection_w.has_partition(partition_name)[0] # verify that drop the partition again with exception partition_w.drop(check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist}) @pytest.mark.tags(CaseLabel.L2) def test_partition_create_and_drop_multi_times(self): """ target: verify create and drop for times method: 1.create a partition with default schema 2. drop the partition 3. loop #1 and #2 for times expected: create and drop successfully """ # create collection collection_w = self.init_collection_wrap() # range for 5 times partition_name = cf.gen_unique_str(prefix) for i in range(5): # create partition and check that the partition exists partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # drop partition and check that the partition not exists partition_w.drop() assert not collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L1) def test_partition_drop_non_empty_partition(self): """ target: verify drop a partition which has data inserted method: 1.create a partition with default schema 2. insert some data 3. drop the partition expected: drop successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # insert data to partition partition_w.insert(cf.gen_default_dataframe_data()) # drop partition partition_w.drop() assert not collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)]) @pytest.mark.parametrize("index_param", cf.gen_simple_index()) def test_partition_drop_indexed_partition(self, data, index_param): """ target: verify drop an indexed partition method: 1. create a partition 2. insert same data 3. create an index 5. drop the partition expected: drop successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # insert data to partition ins_res, _ = partition_w.insert(data) assert len(ins_res.primary_keys) == len(data[0]) # create index of collection collection_w.create_index(ct.default_float_vec_field_name, index_param) # drop partition partition_w.drop() assert not collection_w.has_partition(partition_name)[0] @pytest.mark.tags(CaseLabel.L2) def test_partition_release_empty_partition(self): """ target: verify release an empty partition method: 1. create a partition 2. release the partition expected: release successfully """ # create partition partition_w = self.init_partition_wrap() assert partition_w.is_empty # release partition partition_w.release() # TODO: assert no more memory consumed @pytest.mark.tags(CaseLabel.L2) def test_partition_release_dropped_partition(self): """ target: verify release a dropped partition method: 1. create a partition 2. drop the partition 3. release the partition expected: raise exception """ # create partition partition_w = self.init_partition_wrap() # drop partition partition_w.drop() # release the dropped partition and check err response partition_w.release(check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist}) @pytest.mark.tags(CaseLabel.L2) def test_partition_release_dropped_collection(self): """ target: verify release a dropped collection method: 1. create a collection and partition 2. drop the collection 3. release the partition expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # drop collection collection_w.drop() # release the partition and check err response partition_w.release(check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "can't find collection"}) @pytest.mark.tags(CaseLabel.L1) def test_partition_release_after_collection_released(self): """ target: verify release a partition after the collection released method: 1. create a collection and partition 2. insert some data 3. release the collection 4. release the partition expected: partition released successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # insert data to partition data = cf.gen_default_list_data() partition_w.insert(data) assert partition_w.num_entities == len(data[0]) assert collection_w.num_entities == len(data[0]) # load partition partition_w.load() # search of partition search_vectors = cf.gen_vectors(1, ct.default_dim) res_1, _ = partition_w.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1) assert len(res_1) == 1 # release collection collection_w.release() # search of partition res_2, _ = partition_w.search(data=search_vectors, anns_field=ct.default_float_vec_field_name, params={"nprobe": 32}, limit=1, check_task=ct.CheckTasks.err_res, check_items={ct.err_code: 0, ct.err_msg: "not loaded into memory"}) # release partition partition_w.release() @pytest.mark.tags(CaseLabel.L1) def test_partition_insert_default_partition(self): """ target: verify insert data into _default partition method: 1. create a collection 2. insert some data into _default partition expected: insert successfully """ # create collection collection_w = self.init_collection_wrap() # get the default partition partition_name = ct.default_partition_name assert collection_w.has_partition(partition_name)[0] partition_w = self.init_partition_wrap(collection_w, partition_name) # insert data to partition data = cf.gen_default_dataframe_data() partition_w.insert(data) # self._connect().flush([collection_w.name]) assert partition_w.num_entities == len(data) @pytest.mark.tags(CaseLabel.L1) def test_partition_insert_dropped_partition(self): """ target: verify insert data into a dropped partition method: 1. create a collection 2. insert some data into a dropped partition expected: raise exception """ # create partition partition_w = self.init_partition_wrap() # drop partition partition_w.drop() # insert data to partition partition_w.insert(cf.gen_default_dataframe_data(), check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"}) # TODO: update the assert error @pytest.mark.tags(CaseLabel.L1) def test_partition_insert_dropped_collection(self): """ target: verify insert data into a dropped collection method: 1. create a collection 2. insert some data into a dropped collection expected: raise exception """ # create collection collection_w = self.init_collection_wrap() # create partition partition_name = cf.gen_unique_str(prefix) partition_w = self.init_partition_wrap(collection_w, partition_name) assert collection_w.has_partition(partition_name)[0] # drop collection collection_w.drop() # insert data to partition partition_w.insert(cf.gen_default_dataframe_data(), check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "None Type"}) @pytest.mark.tags(CaseLabel.L2) def test_partition_insert_maximum_size_data(self): """ target: verify insert maximum size data(256M?) a time method: 1. create a partition 2. insert maximum size data expected: insert successfully """ # create collection collection_w = self.init_collection_wrap() # create partition partition_w = self.init_partition_wrap(collection_w) # insert data to partition max_size = 100000 # TODO: clarify the max size of data ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40) assert len(ins_res.primary_keys) == max_size # self._connect().flush([collection_w.name]) assert partition_w.num_entities == max_size @pytest.mark.tags(CaseLabel.L2) @pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1]) def test_partition_insert_mismatched_dimensions(self, dim): """ target: verify insert maximum size data(256M?) a time method: 1. create a collection with default dim 2. insert dismatch dim data expected: raise exception """ # create partition partition_w = self.init_partition_wrap() data = cf.gen_default_list_data(nb=10, dim=dim) # insert data to partition partition_w.insert(data, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("sync", [True, False]) def test_partition_insert_sync(self, sync): """ target: verify insert sync method: 1. create a partition 2. insert data in sync expected: insert successfully """ pass
FlowFeatures.py
# coding: utf8 ''' __author__: helloriku __time__: 2020/7/6 ''' import argparse import threading import multiprocessing import time import json import signal from scapy.all import * # from scapy.utils import PcapReader # TCP或UDP流 class tcp_udp_stream: def __init__(self): self.id = {} # 四元组 self.first_t = 0 # 首包time self.p_t = 0 # 最新包time self.s2c_t = 0 # 最新s2c包time self.c2s_t = 0 # 最新c2s包time # self.state = 0 # 当前TCP状态 # self.timeout = 0 # 判断是否超时,超时标记 1 (流结束) self.p_count = 0 # 包个数 self.s2c_count = 0 # s2c个数 self.c2s_count = 0 # c2s个数 self.c2s_t_inter = [0] # c2s间隔序列 self.s2c_t_inter = [0] # s2c间隔序列 self.sess_t_inter = [0] # 双向流间隔序列(即所有包间隔序列) self.c2s_len = [] # c2s字节数序列 self.s2c_len = [] # s2c字节数序列 self.p_len = [] # 双向字节数序列(即所有包字节数序列) self.total_len = 0 # 总字节数 self.c2s_total_len = 0 # c2s总字节数 self.s2c_total_len = 0 # s2c总字节数 def print_flows(flows,flows_type): global is_sigint_up # 无中断信号时 while not is_sigint_up: print('='*50, flows_type, "="*50) print('client <==> server | packets | Bytes | c2s Packets | c2s Bytes | s2c Packets | s2c Bytes | Duration') for flow in flows.values(): id = flow.id['c_addr'] + ':' + str(flow.id['c_port']) + ' <====> ' + flow.id['s_addr'] + ':' + str(flow.id['s_port']) print(id, ' | ', flow.p_count, ' | ', flow.total_len, ' | ', flow.c2s_count, ' | ', flow.c2s_total_len, ' | ', flow.s2c_count, ' | ', flow.s2c_total_len, ' | ', flow.p_t - flow.first_t) time.sleep(15) def store_flows(flows, flows_type, outfile): res = {} count = 0 # print result print('=' * 50, flows_type, "=" * 50) print(' client <=====> server | packets | Bytes | c2s Packets | c2s Bytes | s2c Packets | s2c Bytes | Duration') for flow in flows.values(): id = flow.id['c_addr'] + ':' + str(flow.id['c_port']) + ' <====> ' + flow.id['s_addr'] + ':' + str( flow.id['s_port']) print(id, ' | ', flow.p_count, ' | ', flow.total_len, ' | ', flow.c2s_count, ' | ', flow.c2s_total_len, ' | ', flow.s2c_count, ' | ', flow.s2c_total_len, ' | ', flow.p_t - flow.first_t) # res.update({str(count) : flow.__dict__}) res.update({str(count): {'id':flow.id,'p_count':flow.p_count, 's2c_count':flow.s2c_count, 'c2s_count':flow.c2s_count, 'total_len':flow.total_len, 's2c_total_len':flow.s2c_total_len,'c2s_total_len':flow.c2s_total_len, 'p_len':flow.p_len, 's2c_len':flow.s2c_len, 'c2s_len':flow.c2s_len, 'Duration':flow.p_t - flow.first_t,'sess_t_inter':flow.sess_t_inter, 's2c_t_inter':flow.s2c_t_inter, 'c2s_t_inter':flow.c2s_t_inter}}) count += 1 # store result with open(outfile, "w", encoding='utf-8') as f: json.dump(res, f, indent=2, ensure_ascii=False) def tcp_udp_callback(p): global tcp_flows global udp_flows if p.haslayer("TCP"): prot = p.getlayer("TCP") flows = tcp_flows flow_type = 'TCP' elif p.haslayer("UDP"): prot = p.getlayer("UDP") flows = udp_flows flow_type = 'UDP' else: return try: dic = {} # server <--> client s2c = 0 if prot.sport <= prot.dport: dic['s_addr'] = p[1].src # s2c dic['c_addr'] = p[1].dst dic['s_port'] = prot.sport dic['c_port'] = prot.dport s2c = 1 else: dic['s_addr'] = p[1].dst # c2s dic['c_addr'] = p[1].src dic['s_port'] = prot.dport dic['c_port'] = prot.sport prot_s = tcp_udp_stream() p_len = len(corrupt_bytes(p)) if str(dic) not in flows.keys(): # 新流 prot_s.id = dic prot_s.first_t = prot_s.p_t = p.time prot_s.p_count = 1 prot_s.p_len.append(p_len) prot_s.total_len = p_len # prot_s.state = if s2c: prot_s.s2c_count = 1 prot_s.s2c_t = p.time prot_s.s2c_len.append(p_len) prot_s.s2c_total_len += p_len else: prot_s.c2s_count = 1 prot_s.c2s_t = p.time prot_s.c2s_len.append(p_len) prot_s.c2s_total_len += p_len print("New ", flow_type, " stream: ", p[1].src, ':', prot.sport, "===>", p[1].dst, ':', prot.dport) flows[str(dic)] = prot_s else: # old stream # print("Old stream: ", p[1].src, ':', prot.sport, "===>", p[1].dst, ':', prot.dport) flows[str(dic)].p_count += 1 flows[str(dic)].p_len.append(p_len) flows[str(dic)].total_len += p_len flows[str(dic)].sess_t_inter.append(p.time - flows[str(dic)].p_t) flows[str(dic)].p_t = p.time # prot_s.state = if s2c: if flows[str(dic)].s2c_count > 0: # 如果是第一个s2c包,间隔时间仍为0 flows[str(dic)].s2c_t_inter.append(p.time - flows[str(dic)].s2c_t) flows[str(dic)].s2c_count += 1 flows[str(dic)].s2c_t = p.time flows[str(dic)].s2c_len.append(p_len) flows[str(dic)].s2c_total_len += p_len else: if flows[str(dic)].c2s_count > 0: flows[str(dic)].c2s_t_inter.append(p.time - flows[str(dic)].c2s_t) flows[str(dic)].c2s_count += 1 flows[str(dic)].c2s_t = p.time flows[str(dic)].c2s_len.append(p_len) flows[str(dic)].c2s_total_len += p_len except AttributeError: pass def local_sniff(): sniff(prn=tcp_udp_callback) # 处理ctrl+c def sigint_handler(signum, frame): global is_sigint_up is_sigint_up = True print('catched interrupt signal!') print('Please do not close the window, the result is being stored...') if __name__ == '__main__': print('welcome to scapy-based split-flow tool.\n') parser = argparse.ArgumentParser(description='online/offline split-flow tool') parser.add_argument('-p', '--pcap', dest='pcap_file', action='store', help='offline mode, read a pcap file') args = parser.parse_args() tcp_flows = {} udp_flows = {} tcp_type = 'TCP' udp_type = 'UDP' # 处理中断(online mode下) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigint_handler) is_sigint_up = False # offline if args.pcap_file: print('offline mode.\nread file: ', args.pcap_file) print('Please wait ......') start_t = time.time() packets = rdpcap(args.pcap_file) for p in packets: tcp_udp_callback(p) # sniff(offline = args.pcap_file, prn=tcp_udp_callback) file_tcp = args.pcap_file.split('.')[0] + '_tcp.json' file_udp = args.pcap_file.split('.')[0] + '_udp.json' store_flows(udp_flows, udp_type, file_tcp) store_flows(tcp_flows, tcp_type, file_udp) print('spend time : ',time.time() - start_t, 's.') print('output:') print(args.pcap_file.split('.')[0] + '_tcp.json') print(args.pcap_file.split('.')[0] + '_udp.json') # online else: threads = [] t_tcp = threading.Thread(target=print_flows, args=(tcp_flows, tcp_type,), daemon=True) #守护线程,主线程结束,子线程也终止 t_udp = threading.Thread(target=print_flows, args=(udp_flows, udp_type,), daemon=True) sn = threading.Thread(target=local_sniff, daemon=True) t_tcp.start() t_udp.start() sn.start() while 1: if not t_tcp.is_alive() and not t_udp.is_alive(): break tmp = str(time.time()) out_tcp = 'tcp_' + tmp + '.json' out_udp = 'udp_' + tmp + '.json' store_flows(udp_flows, udp_type, out_tcp) store_flows(tcp_flows, tcp_type, out_udp) print('output:') print('tcp_' + tmp + '.json') print('udp_' + tmp + '.json') # sniff(session=TCPSession, prn=lambda x: x.summary(), store=False) # sn = sniff(offline="H:\VPN-nonVPN(ISCXVPN2016)\pcaps\AIMchat2.pcapng",session = NetflowSession) # print(sn) # print(type(sn)) # print(len(sn))
iap_tunnel_websocket.py
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WebSocket connection class for tunneling with Cloud IAP.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import collections import threading import time from googlecloudsdk.api_lib.compute import iap_tunnel_websocket_helper as helper from googlecloudsdk.api_lib.compute import iap_tunnel_websocket_utils as utils from googlecloudsdk.core import exceptions from googlecloudsdk.core import http from googlecloudsdk.core import log from googlecloudsdk.core.util import retry MAX_WEBSOCKET_OPEN_WAIT_TIME_SEC = 60 MAX_RECONNECT_SLEEP_TIME_MS = 20 * 1000 # 20 seconds MAX_RECONNECT_WAIT_TIME_MS = 15 * 60 * 1000 # 15 minutes MAX_UNSENT_QUEUE_LENGTH = 5 ALL_DATA_SENT_WAIT_TIME_SEC = 10 RECONNECT_INITIAL_SLEEP_MS = 1500 class ConnectionCreationError(exceptions.Error): pass class ConnectionReconnectTimeout(exceptions.Error): pass class SubprotocolEarlyAckError(exceptions.Error): pass class SubprotocolEarlyDataError(exceptions.Error): pass class SubprotocolExtraConnectSuccessSid(exceptions.Error): pass class SubprotocolExtraReconnectSuccessAck(exceptions.Error): pass class SubprotocolInvalidAckError(exceptions.Error): pass class SubprotocolOutOfOrderAckError(exceptions.Error): pass class IapTunnelWebSocket(object): """Cloud IAP WebSocket class for tunnelling connections. It takes in local data (via Send()) which it sends over the websocket. It takes data from the websocket and gives it to data_handler_callback. """ def __init__(self, tunnel_target, get_access_token_callback, data_handler_callback, close_handler_callback, ignore_certs=False): self._tunnel_target = tunnel_target self._get_access_token_callback = get_access_token_callback self._data_handler_callback = data_handler_callback self._close_handler_callback = close_handler_callback self._ignore_certs = ignore_certs self._websocket_helper = None self._connect_msg_received = False self._connection_sid = None self._stopping = False self._close_message_sent = False self._send_and_reconnect_thread = None # Indicates if the local input gave an EOF. self._input_eof = False # Indicates that after getting a local input EOF, we have send all previous # local data over the websocket. self._sent_all = threading.Event() self._total_bytes_confirmed = 0 self._total_bytes_received = 0 self._total_bytes_received_and_acked = 0 self._unsent_data = collections.deque() self._unconfirmed_data = collections.deque() def __del__(self): if self._websocket_helper: self._websocket_helper.Close() def Close(self): """Close down local connection and WebSocket connection.""" self._stopping = True try: self._close_handler_callback() except: # pylint: disable=bare-except pass if self._websocket_helper: if not self._close_message_sent: self._websocket_helper.SendClose() self._close_message_sent = True self._websocket_helper.Close() def InitiateConnection(self): """Initiate the WebSocket connection.""" utils.CheckPythonVersion(self._ignore_certs) utils.ValidateParameters(self._tunnel_target) self._StartNewWebSocket() self._WaitForOpenOrRaiseError() self._send_and_reconnect_thread = threading.Thread( target=self._SendDataAndReconnectWebSocket) self._send_and_reconnect_thread.daemon = True self._send_and_reconnect_thread.start() def Send(self, bytes_to_send): """Send bytes over WebSocket connection. Args: bytes_to_send: The bytes to send. Must not be empty. Raises: ConnectionReconnectTimeout: If something is preventing data from being sent. """ while bytes_to_send: first_to_send = bytes_to_send[:utils.SUBPROTOCOL_MAX_DATA_FRAME_SIZE] bytes_to_send = bytes_to_send[utils.SUBPROTOCOL_MAX_DATA_FRAME_SIZE:] if first_to_send: self._EnqueueBytesWithWaitForReconnect(first_to_send) def LocalEOF(self): """Indicate that the local input gave an EOF. Send must not be called after this. """ self._input_eof = True if not self._unsent_data: self._sent_all.set() def WaitForAllSent(self): """Wait until all local data has been sent on the websocket. Blocks until either all data from Send() has been sent, or it times out waiting. Once true, always returns true. Even if this returns true, a reconnect could occur causing previously sent data to be resent. Must only be called after an EOF has been given to Send(). Returns: True on success, False on timeout. """ # If we didn't have any wait time, python2 would ignore ctrl-c when in # --listen-on-stdin mode. With a wait time, it pays attention to ctrl-c. # When doing ssh, the inner gcloud will continue behind the scenes until # either all data is sent or this times out. We don't want a weird hidden # gcloud staying around like that for a long time (even in the case of a # write block), so the wait time isn't very long. return self._sent_all.wait(ALL_DATA_SENT_WAIT_TIME_SEC) def _AttemptReconnect(self, reconnect_func): """Attempt to reconnect with a new WebSocket.""" r = retry.Retryer(max_wait_ms=MAX_RECONNECT_WAIT_TIME_MS, exponential_sleep_multiplier=1.1, wait_ceiling_ms=MAX_RECONNECT_SLEEP_TIME_MS) try: r.RetryOnException(func=reconnect_func, sleep_ms=RECONNECT_INITIAL_SLEEP_MS) except retry.RetryException: log.warning('Unable to reconnect within [%d] ms', MAX_RECONNECT_WAIT_TIME_MS, exc_info=True) self._StopConnectionAsync() def _EnqueueBytesWithWaitForReconnect(self, bytes_to_send): """Add bytes to the queue; sleep waiting for reconnect if queue is full. Args: bytes_to_send: The local bytes to send over the websocket. At most utils.SUBPROTOCOL_MAX_DATA_FRAME_SIZE. Raises: ConnectionReconnectTimeout: If something is preventing data from being sent. """ end_time = time.time() + MAX_RECONNECT_WAIT_TIME_MS / 1000.0 while time.time() < end_time: if len(self._unsent_data) < MAX_UNSENT_QUEUE_LENGTH: self._unsent_data.append(bytes_to_send) log.debug('ENQUEUED data_len [%d] bytes_to_send[:20] [%r]', len(bytes_to_send), bytes_to_send[:20]) return time.sleep(0.01) raise ConnectionReconnectTimeout() def _HasConnected(self): """Returns true if we received a connect message.""" return self._connect_msg_received def _IsClosed(self): return ((self._websocket_helper and self._websocket_helper.IsClosed()) or (self._send_and_reconnect_thread and not self._send_and_reconnect_thread.isAlive())) def _StartNewWebSocket(self): """Start a new WebSocket and thread to listen for incoming data.""" headers = ['User-Agent: ' + http.MakeUserAgentString()] if self._get_access_token_callback: headers += ['Authorization: Bearer ' + self._get_access_token_callback()] if self._connection_sid: url = utils.CreateWebSocketReconnectUrl( self._tunnel_target, self._connection_sid, self._total_bytes_received) log.info('Reconnecting with URL [%r]', url) else: url = utils.CreateWebSocketConnectUrl(self._tunnel_target) log.info('Connecting with URL [%r]', url) self._connect_msg_received = False self._websocket_helper = helper.IapTunnelWebSocketHelper( url, headers, self._ignore_certs, self._tunnel_target.proxy_info, self._OnData, self._OnClose) self._websocket_helper.StartReceivingThread() def _SendAck(self): """Send an ACK back to server.""" if self._total_bytes_received > self._total_bytes_received_and_acked: bytes_received = self._total_bytes_received try: ack_data = utils.CreateSubprotocolAckFrame(bytes_received) self._websocket_helper.Send(ack_data) self._total_bytes_received_and_acked = bytes_received except helper.WebSocketConnectionClosed: pass except EnvironmentError as e: log.info('Unable to send WebSocket ack [%s]', str(e)) except: # pylint: disable=bare-except if not self._IsClosed(): log.info('Error while attempting to ack [%d] bytes', bytes_received, exc_info=True) def _SendDataAndReconnectWebSocket(self): """Main function for send_and_reconnect_thread.""" def Reconnect(): if not self._stopping: self._StartNewWebSocket() self._WaitForOpenOrRaiseError() try: while not self._stopping: if self._IsClosed(): self._AttemptReconnect(Reconnect) elif self._HasConnected(): self._SendQueuedData() if not self._IsClosed(): self._SendAck() if not self._stopping: time.sleep(0.01) except: # pylint: disable=bare-except log.debug('Error from WebSocket while sending data.', exc_info=True) self.Close() def _SendQueuedData(self): """Send data that is sitting in the unsent data queue.""" while self._unsent_data and not self._stopping: try: send_data = utils.CreateSubprotocolDataFrame(self._unsent_data[0]) # We need to append to _unconfirmed_data before calling Send(), because # otherwise we could receive the ack for the sent data before we do the # append, which we would interpret as an invalid ack. This does mean # there's a small window of time where we'll accept acks of data that # hasn't truly been sent if a badly behaving server sends such acks, but # that's not really a problem, because we'll behave identically to as if # the ack was received after the data was sent (so no data or control # flow corruption), and we don't have a goal of giving an error every # time the server misbehaves. self._unconfirmed_data.append(self._unsent_data.popleft()) self._websocket_helper.Send(send_data) except helper.WebSocketConnectionClosed: break except EnvironmentError as e: log.info('Unable to send WebSocket data [%s]', str(e)) break except: # pylint: disable=bare-except log.info('Error while attempting to send [%d] bytes', len(send_data), exc_info=True) break # We need to check _input_eof before _unsent_data to avoid a race # condition with setting _input_eof simultaneously with this check. if self._input_eof and not self._unsent_data: self._sent_all.set() def _StopConnectionAsync(self): self._stopping = True def _WaitForOpenOrRaiseError(self): """Wait for WebSocket open confirmation or any error condition.""" for _ in range(MAX_WEBSOCKET_OPEN_WAIT_TIME_SEC * 100): if self._IsClosed(): break if self._HasConnected(): return time.sleep(0.01) if (self._websocket_helper and self._websocket_helper.IsClosed() and self._websocket_helper.ErrorMsg()): extra_msg = '' # Error messages like 'Handshake status 400' or 'Handshake status 404' # may often indicate missing permissions. if self._websocket_helper.ErrorMsg().startswith('Handshake status 40'): extra_msg = ' (May be due to missing permissions)' error_msg = ('Error while connecting [%s].%s' % (self._websocket_helper.ErrorMsg(), extra_msg)) raise ConnectionCreationError(error_msg) raise ConnectionCreationError('Unexpected error while connecting. Check ' 'logs for more details.') def _OnClose(self): self._StopConnectionAsync() def _OnData(self, binary_data): """Receive a single message from the server.""" tag, bytes_left = utils.ExtractSubprotocolTag(binary_data) # In order of decreasing usage during connection: if tag == utils.SUBPROTOCOL_TAG_DATA: self._HandleSubprotocolData(bytes_left) elif tag == utils.SUBPROTOCOL_TAG_ACK: self._HandleSubprotocolAck(bytes_left) elif tag == utils.SUBPROTOCOL_TAG_CONNECT_SUCCESS_SID: self._HandleSubprotocolConnectSuccessSid(bytes_left) elif tag == utils.SUBPROTOCOL_TAG_RECONNECT_SUCCESS_ACK: self._HandleSubprotocolReconnectSuccessAck(bytes_left) else: log.debug('Unsupported subprotocol tag [%r], discarding the message', tag) def _HandleSubprotocolAck(self, binary_data): """Handle Subprotocol ACK Frame.""" if not self._HasConnected(): self._StopConnectionAsync() raise SubprotocolEarlyAckError('Received ACK before connected.') bytes_confirmed, bytes_left = utils.ExtractSubprotocolAck(binary_data) self._ConfirmData(bytes_confirmed) if bytes_left: log.debug('Discarding [%d] extra bytes after processing ACK', len(bytes_left)) def _HandleSubprotocolConnectSuccessSid(self, binary_data): """Handle Subprotocol CONNECT_SUCCESS_SID Frame.""" if self._HasConnected(): self._StopConnectionAsync() raise SubprotocolExtraConnectSuccessSid( 'Received CONNECT_SUCCESS_SID after already connected.') data, bytes_left = utils.ExtractSubprotocolConnectSuccessSid(binary_data) self._connection_sid = data self._connect_msg_received = True if bytes_left: log.debug( 'Discarding [%d] extra bytes after processing CONNECT_SUCCESS_SID', len(bytes_left)) def _HandleSubprotocolReconnectSuccessAck(self, binary_data): """Handle Subprotocol RECONNECT_SUCCESS_ACK Frame.""" if self._HasConnected(): self._StopConnectionAsync() raise SubprotocolExtraReconnectSuccessAck( 'Received RECONNECT_SUCCESS_ACK after already connected.') bytes_confirmed, bytes_left = ( utils.ExtractSubprotocolReconnectSuccessAck(binary_data)) bytes_being_confirmed = bytes_confirmed - self._total_bytes_confirmed self._ConfirmData(bytes_confirmed) log.info( 'Reconnecting: confirming [%d] bytes and resending [%d] messages.', bytes_being_confirmed, len(self._unconfirmed_data)) self._unsent_data.extendleft(reversed(self._unconfirmed_data)) self._unconfirmed_data = collections.deque() self._connect_msg_received = True if bytes_left: log.debug( 'Discarding [%d] extra bytes after processing RECONNECT_SUCCESS_ACK', len(bytes_left)) def _HandleSubprotocolData(self, binary_data): """Handle Subprotocol DATA Frame.""" if not self._HasConnected(): self._StopConnectionAsync() raise SubprotocolEarlyDataError('Received DATA before connected.') data, bytes_left = utils.ExtractSubprotocolData(binary_data) self._total_bytes_received += len(data) try: self._data_handler_callback(data) except: # pylint: disable=bare-except self._StopConnectionAsync() raise if bytes_left: log.debug('Discarding [%d] extra bytes after processing DATA', len(bytes_left)) def _ConfirmData(self, bytes_confirmed): """Discard data that has been confirmed via ACKs received from server.""" if bytes_confirmed < self._total_bytes_confirmed: self._StopConnectionAsync() raise SubprotocolOutOfOrderAckError( 'Received out-of-order Ack for [%d] bytes.' % bytes_confirmed) bytes_to_confirm = bytes_confirmed - self._total_bytes_confirmed while bytes_to_confirm and self._unconfirmed_data: data_chunk = self._unconfirmed_data.popleft() if len(data_chunk) > bytes_to_confirm: self._unconfirmed_data.appendleft(data_chunk[bytes_to_confirm:]) self._total_bytes_confirmed += bytes_to_confirm else: self._total_bytes_confirmed += len(data_chunk) bytes_to_confirm = bytes_confirmed - self._total_bytes_confirmed if bytes_to_confirm: self._StopConnectionAsync() raise SubprotocolInvalidAckError( 'Bytes confirmed [%r] were larger than bytes sent [%r].' % (bytes_confirmed, self._total_bytes_confirmed))
audio_player.py
import pygame import time import threading import multiprocessing import operator audioChannels = 0 class AudioPlayer(threading.Thread): def __init__(self, startAddress, playlist, ArtNetReceiver, systemCommunication): threading.Thread.__init__(self) global audioChannels audioChannels += 1 self.addr = startAddress - 1 # using 1 based addresses self.playlist = playlist self.channelNumber = audioChannels self.comm = systemCommunication self.comm["audio"][self.channelNumber] = {} self.queue = multiprocessing.Queue() self.outqueue = multiprocessing.Queue() self.playerProcess = multiprocessing.Process(target=self.playerWorker, args=(self.queue, self.outqueue)) self.playerProcess.start() self.actPlaystate = "stop" self.actPlaystateValue = 0 self.playingFile = False self.channels = { "volume": self.addr + 0, "folder": self.addr + 1, "file": self.addr + 2, "playstate": self.addr + 3 } ArtNetReceiver.registerCallback(self.channels.get("volume", self.addr) , self.volume) ArtNetReceiver.registerCallback([ self.channels.get("folder", self.addr), self.channels.get("file", self.addr) ] , self.load) ArtNetReceiver.registerCallback(self.channels.get("playstate", self.addr) , self.playstate) self.stopped = False self.start() def stop(self): print "Audioplayer %s Ending" % self.channelNumber self.stopped = True def run(self): while not self.stopped: try: if self.outqueue.empty(): time.sleep(0.1) else: (param, value) = self.outqueue.get(False, 0.75) self.comm["audio"][self.channelNumber][param] = value time.sleep(0.01) except KeyboardInterrupt: self.stopped = True return False print "Audioplayer %s Terminated" % self.channelNumber def getDMXFootprint(self): return len(self.channels.keys()) def printDMXFootprint(self): sorted_out = sorted(self.channels.items(), key=operator.itemgetter(1)) print "AudioPlayer {0}: ".format(self.channelNumber) print " +------------+---------+" print " | {0:10s} | {1} |".format("Parameter", "Address") print " +------------+---------+" for item in sorted_out: print " | {0:10s} | {1:4d} |".format(item[0], item[1]+1) print " +------------+---------+" print "" def debugPrint(self, method, message): #print "[Player {0:2d}, {1:6s}]: {2}".format(self.channelNumber, method, message) pass def playerWorker(self, queue, comm): actualFile = False pygame.init() position = 0 pause = False play = False loop = False while True: try: (command, value) = queue.get() try: if command == "playmode": if value < 32: # STOP self.debugPrint( "MODE", "STOP") pause = False play = False loop = False comm.put(("state", "stop")) pygame.mixer.music.stop() elif value < 128: # PLAY (<64) and LOOP (<128) if pause: pygame.mixer.music.unpause() comm.put(("state", "unpause")) else: loop = False pygame.mixer.music.load(actualFile) if value < 64: self.debugPrint( "MODE", "PLAY") pygame.mixer.music.play(0) loop = True comm.put(("state", "play")) else: self.debugPrint( "MODE", "LOOP") pygame.mixer.music.play(-1) loop = True comm.put(("state", "loop")) play = True pause = False elif value < 160: # PAUSE self.debugPrint( "MODE", "PAUSE") play = False pause = True comm.put(("state", "pause")) pygame.mixer.music.pause() else: # Do Nothing Mode / Reserved self.debugPrint( "MODE", "No Change") elif command == "load": (folderId, fileId, nextFile) = value if not nextFile: self.debugPrint( "LOAD", "{0}/{1}: No File at Index".format(folderId, fileId)) actualFile = False continue; self.debugPrint( "LOAD", "{0}/{1}: {2}".format(folderId, fileId, nextFile)) pygame.mixer.music.load(nextFile) actualFile = nextFile comm.put(("folder", folderId)) comm.put(("fileNo", fileId)) comm.put(("file", nextFile)) if play or pause: if loop: pygame.mixer.music.play(-1) self.debugPrint( "LOAD", "Resuming Looped") else: pygame.mixer.music.play(0) self.debugPrint( "LOAD", "Resuming Play") if pause: pygame.mixer.music.pause() self.debugPrint( "LOAD", "Resuming Pause") elif command == "volume": self.debugPrint( "VOLUME", "{0:5.1f}%".format(value * 100)) comm.put(("volume", value * 100)) pygame.mixer.music.set_volume(value) except Exception as e: print "Error in audioplayer %s" % self.channelNumber print e pass except KeyboardInterrupt: return False def volume(self, channel, value, dmx): self.queue.put(("volume", value/255.0)) def load(self, channel, value, dmx): folderId = value[0] fileId = value[1] nextFile = self.playlist.get(str(folderId), {}).get(str(fileId), False) self.queue.put(("load", (folderId, fileId, nextFile))) def playstate(self, channel, value, dmx): self.queue.put(("playmode", value))
service_utils.py
#!/usr/bin/env python3 # Copyright 2021 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Heavily inspired, even copied from the utilities used to build the # Joy Detection Demo. import queue import threading class Service: """ An abstract class used to build services which can be started, stopped and manage a queue of requests. Subclasses should implement the process method to do work, while clients should call submit to schedule it. Since this class is a ContextManager subclasses can be used in 'with' statements' """ def __init__(self): self._requests = queue.Queue() self._thread = threading.Thread(target=self._run, daemon=True) self._thread.start() def _run(self): while True: request = self._requests.get() if request is None: self.shutdown() break self.process(request) self._requests.task_done() def process(self, request): """ Implemented by subclasses to do the particular work of the service """ pass def shutdown(self): """Implemented by subclasses take care of any clean up after the last request is processed.""" pass def submit(self, request): self._requests.put(request) def close(self): self._requests.put(None) self._thread.join() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def main(): """Example on how to use the Service class.""" class MyService(Service): def process(self, request): print("Processing %s" % request) def shutdown(self): print("Shutting down") with MyService() as my_service: my_service.submit("Hello") my_service.submit("World") if __name__ == "__main__": main()
test_server.py
import os import time import tempfile import uuid from multiprocessing import Process, Manager from typing import List, Text, Type from contextlib import ExitStack from aioresponses import aioresponses import pytest from freezegun import freeze_time from mock import MagicMock import rasa import rasa.constants from rasa.core import events, utils from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput from rasa.core.channels.slack import SlackBot from rasa.core.events import Event, UserUttered, SlotSet, BotUttered from rasa.core.trackers import DialogueStateTracker from rasa.model import unpack_model from rasa.utils.endpoints import EndpointConfig from sanic import Sanic from sanic.testing import SanicTestClient, PORT from tests.nlu.utilities import ResponseTest from tests.conftest import get_test_client # a couple of event instances that we can use for testing test_events = [ Event.from_parameters( { "event": UserUttered.type_name, "text": "/goodbye", "parse_data": { "intent": {"confidence": 1.0, "name": "greet"}, "entities": [], }, } ), BotUttered("Welcome!", {"test": True}), SlotSet("cuisine", 34), SlotSet("cuisine", "34"), SlotSet("location", None), SlotSet("location", [34, "34", None]), ] @pytest.fixture def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient: return get_test_client(rasa_server_without_api) @pytest.fixture def rasa_app(rasa_server: Sanic) -> SanicTestClient: return get_test_client(rasa_server) @pytest.fixture def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient: return get_test_client(rasa_nlu_server) @pytest.fixture def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient: return get_test_client(rasa_core_server) @pytest.fixture def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient: return get_test_client(rasa_server_secured) def test_root(rasa_app: SanicTestClient): _, response = rasa_app.get("/") assert response.status == 200 assert response.text.startswith("Hello from Rasa:") def test_root_without_enable_api(rasa_app_without_api: SanicTestClient): _, response = rasa_app_without_api.get("/") assert response.status == 200 assert response.text.startswith("Hello from Rasa:") def test_root_secured(rasa_secured_app: SanicTestClient): _, response = rasa_secured_app.get("/") assert response.status == 200 assert response.text.startswith("Hello from Rasa:") def test_version(rasa_app: SanicTestClient): _, response = rasa_app.get("/version") content = response.json assert response.status == 200 assert content.get("version") == rasa.__version__ assert ( content.get("minimum_compatible_version") == rasa.constants.MINIMUM_COMPATIBLE_VERSION ) def test_status(rasa_app: SanicTestClient): _, response = rasa_app.get("/status") assert response.status == 200 assert "fingerprint" in response.json assert "model_file" in response.json def test_status_nlu_only(rasa_app_nlu: SanicTestClient): _, response = rasa_app_nlu.get("/status") assert response.status == 200 assert "fingerprint" in response.json assert "model_file" in response.json def test_status_secured(rasa_secured_app: SanicTestClient): _, response = rasa_secured_app.get("/status") assert response.status == 401 def test_status_not_ready_agent(rasa_app: SanicTestClient): rasa_app.app.agent = None _, response = rasa_app.get("/status") assert response.status == 409 @pytest.fixture def formbot_data(): return dict( domain="examples/formbot/domain.yml", config="examples/formbot/config.yml", stories="examples/formbot/data/stories.md", nlu="examples/formbot/data/nlu.md", ) def test_train_status(rasa_server, rasa_app, formbot_data): with ExitStack() as stack: payload = { key: stack.enter_context(open(path)).read() for key, path in formbot_data.items() } def train(results): client1 = SanicTestClient(rasa_server, port=PORT + 1) _, train_resp = client1.post("/model/train", json=payload) results["train_response_code"] = train_resp.status # Run training process in the background manager = Manager() results = manager.dict() p1 = Process(target=train, args=(results,)) p1.start() # Query the status endpoint a few times to ensure the test does # not fail prematurely due to mismatched timing of a single query. for i in range(10): time.sleep(1) _, status_resp = rasa_app.get("/status") assert status_resp.status == 200 if status_resp.json["num_active_training_jobs"] == 1: break assert status_resp.json["num_active_training_jobs"] == 1 p1.join() assert results["train_response_code"] == 200 _, status_resp = rasa_app.get("/status") assert status_resp.status == 200 assert status_resp.json["num_active_training_jobs"] == 0 @pytest.mark.parametrize( "response_test", [ ResponseTest( "/model/parse", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello", }, payload={"text": "hello"}, ), ResponseTest( "/model/parse", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello", }, payload={"text": "hello"}, ), ResponseTest( "/model/parse", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello ńöñàśçií", }, payload={"text": "hello ńöñàśçií"}, ), ], ) def test_parse(rasa_app, response_test): _, response = rasa_app.post(response_test.endpoint, json=response_test.payload) rjs = response.json assert response.status == 200 assert all(prop in rjs for prop in ["entities", "intent", "text"]) assert rjs["entities"] == response_test.expected_response["entities"] assert rjs["text"] == response_test.expected_response["text"] assert rjs["intent"] == response_test.expected_response["intent"] @pytest.mark.parametrize( "response_test", [ ResponseTest( "/model/parse?emulation_mode=wit", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello", }, payload={"text": "hello"}, ), ResponseTest( "/model/parse?emulation_mode=dialogflow", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello", }, payload={"text": "hello"}, ), ResponseTest( "/model/parse?emulation_mode=luis", { "entities": [], "intent": {"confidence": 1.0, "name": "greet"}, "text": "hello ńöñàśçií", }, payload={"text": "hello ńöñàśçií"}, ), ], ) def test_parse_with_different_emulation_mode(rasa_app, response_test): _, response = rasa_app.post(response_test.endpoint, json=response_test.payload) assert response.status == 200 def test_parse_without_nlu_model(rasa_app_core: SanicTestClient): _, response = rasa_app_core.post("/model/parse", json={"text": "hello"}) assert response.status == 200 rjs = response.json assert all(prop in rjs for prop in ["entities", "intent", "text"]) def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient): _, response = rasa_app_nlu.post( "/model/parse?emulation_mode=ANYTHING", json={"text": "hello"} ) assert response.status == 400 def test_train_stack_success( rasa_app, default_domain_path, default_stories_file, default_stack_config, default_nlu_data, ): with ExitStack() as stack: domain_file = stack.enter_context(open(default_domain_path)) config_file = stack.enter_context(open(default_stack_config)) stories_file = stack.enter_context(open(default_stories_file)) nlu_file = stack.enter_context(open(default_nlu_data)) payload = dict( domain=domain_file.read(), config=config_file.read(), stories=stories_file.read(), nlu=nlu_file.read(), ) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 200 assert response.headers["filename"] is not None # save model to temporary file tempdir = tempfile.mkdtemp() model_path = os.path.join(tempdir, "model.tar.gz") with open(model_path, "wb") as f: f.write(response.body) # unpack model and ensure fingerprint is present model_path = unpack_model(model_path) assert os.path.exists(os.path.join(model_path, "fingerprint.json")) def test_train_nlu_success( rasa_app, default_stack_config, default_nlu_data, default_domain_path ): with ExitStack() as stack: domain_file = stack.enter_context(open(default_domain_path)) config_file = stack.enter_context(open(default_stack_config)) nlu_file = stack.enter_context(open(default_nlu_data)) payload = dict( domain=domain_file.read(), config=config_file.read(), nlu=nlu_file.read() ) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 200 # save model to temporary file tempdir = tempfile.mkdtemp() model_path = os.path.join(tempdir, "model.tar.gz") with open(model_path, "wb") as f: f.write(response.body) # unpack model and ensure fingerprint is present model_path = unpack_model(model_path) assert os.path.exists(os.path.join(model_path, "fingerprint.json")) def test_train_core_success( rasa_app, default_stack_config, default_stories_file, default_domain_path ): with ExitStack() as stack: domain_file = stack.enter_context(open(default_domain_path)) config_file = stack.enter_context(open(default_stack_config)) core_file = stack.enter_context(open(default_stories_file)) payload = dict( domain=domain_file.read(), config=config_file.read(), stories=core_file.read(), ) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 200 # save model to temporary file tempdir = tempfile.mkdtemp() model_path = os.path.join(tempdir, "model.tar.gz") with open(model_path, "wb") as f: f.write(response.body) # unpack model and ensure fingerprint is present model_path = unpack_model(model_path) assert os.path.exists(os.path.join(model_path, "fingerprint.json")) def test_train_missing_config(rasa_app: SanicTestClient): payload = dict(domain="domain data", config=None) _, response = rasa_app.post("/model/train", json=payload) assert response.status == 400 def test_train_missing_training_data(rasa_app: SanicTestClient): payload = dict(domain="domain data", config="config data") _, response = rasa_app.post("/model/train", json=payload) assert response.status == 400 def test_train_internal_error(rasa_app: SanicTestClient): payload = dict(domain="domain data", config="config data", nlu="nlu data") _, response = rasa_app.post("/model/train", json=payload) assert response.status == 500 def test_evaluate_stories(rasa_app, default_stories_file): with open(default_stories_file, "r") as f: stories = f.read() _, response = rasa_app.post("/model/test/stories", data=stories) assert response.status == 200 js = response.json assert set(js.keys()) == { "report", "precision", "f1", "accuracy", "actions", "in_training_data_fraction", "is_end_to_end_evaluation", } assert not js["is_end_to_end_evaluation"] assert set(js["actions"][0].keys()) == { "action", "predicted", "confidence", "policy", } def test_evaluate_stories_not_ready_agent( rasa_app_nlu: SanicTestClient, default_stories_file ): with open(default_stories_file, "r") as f: stories = f.read() _, response = rasa_app_nlu.post("/model/test/stories", data=stories) assert response.status == 409 def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file): with open(end_to_end_story_file, "r") as f: stories = f.read() _, response = rasa_app.post("/model/test/stories?e2e=true", data=stories) assert response.status == 200 js = response.json assert set(js.keys()) == { "report", "precision", "f1", "accuracy", "actions", "in_training_data_fraction", "is_end_to_end_evaluation", } assert js["is_end_to_end_evaluation"] assert set(js["actions"][0].keys()) == { "action", "predicted", "confidence", "policy", } def test_evaluate_intent(rasa_app, default_nlu_data): with open(default_nlu_data, "r") as f: nlu_data = f.read() _, response = rasa_app.post("/model/test/intents", data=nlu_data) assert response.status == 200 assert set(response.json.keys()) == { "intent_evaluation", "entity_evaluation", "response_selection_evaluation", } def test_evaluate_intent_on_just_nlu_model( rasa_app_nlu: SanicTestClient, default_nlu_data ): with open(default_nlu_data, "r") as f: nlu_data = f.read() _, response = rasa_app_nlu.post("/model/test/intents", data=nlu_data) assert response.status == 200 assert set(response.json.keys()) == { "intent_evaluation", "entity_evaluation", "response_selection_evaluation", } def test_evaluate_intent_with_query_param( rasa_app, trained_nlu_model, default_nlu_data ): _, response = rasa_app.get("/status") previous_model_file = response.json["model_file"] with open(default_nlu_data, "r") as f: nlu_data = f.read() _, response = rasa_app.post( f"/model/test/intents?model={trained_nlu_model}", data=nlu_data ) assert response.status == 200 assert set(response.json.keys()) == { "intent_evaluation", "entity_evaluation", "response_selection_evaluation", } _, response = rasa_app.get("/status") assert previous_model_file == response.json["model_file"] def test_predict(rasa_app: SanicTestClient): data = { "Events": { "value": [ {"event": "action", "name": "action_listen"}, { "event": "user", "text": "hello", "parse_data": { "entities": [], "intent": {"confidence": 0.57, "name": "greet"}, "text": "hello", }, }, ] } } _, response = rasa_app.post( "/model/predict", json=data, headers={"Content-Type": "application/json"} ) content = response.json assert response.status == 200 assert "scores" in content assert "tracker" in content assert "policy" in content @freeze_time("2018-01-01") def test_requesting_non_existent_tracker(rasa_app: SanicTestClient): _, response = rasa_app.get("/conversations/madeupid/tracker") content = response.json assert response.status == 200 assert content["paused"] is False assert content["slots"] == {"location": None, "cuisine": None} assert content["sender_id"] == "madeupid" assert content["events"] == [ { "event": "action", "name": "action_listen", "policy": None, "confidence": None, "timestamp": 1514764800, } ] assert content["latest_message"] == { "text": None, "intent": {}, "entities": [], "message_id": None, "metadata": {}, } @pytest.mark.parametrize("event", test_events) def test_pushing_event(rasa_app, event): cid = str(uuid.uuid1()) conversation = f"/conversations/{cid}" _, response = rasa_app.post( f"{conversation}/tracker/events", json=event.as_dict(), headers={"Content-Type": "application/json"}, ) assert response.json is not None assert response.status == 200 _, tracker_response = rasa_app.get(f"/conversations/{cid}/tracker") tracker = tracker_response.json assert tracker is not None assert len(tracker.get("events")) == 2 evt = tracker.get("events")[1] assert Event.from_parameters(evt) == event def test_push_multiple_events(rasa_app: SanicTestClient): cid = str(uuid.uuid1()) conversation = f"/conversations/{cid}" events = [e.as_dict() for e in test_events] _, response = rasa_app.post( f"{conversation}/tracker/events", json=events, headers={"Content-Type": "application/json"}, ) assert response.json is not None assert response.status == 200 _, tracker_response = rasa_app.get(f"/conversations/{cid}/tracker") tracker = tracker_response.json assert tracker is not None # there is also an `ACTION_LISTEN` event at the start assert len(tracker.get("events")) == len(test_events) + 1 assert tracker.get("events")[1:] == events def test_put_tracker(rasa_app: SanicTestClient): data = [event.as_dict() for event in test_events] _, response = rasa_app.put( "/conversations/pushtracker/tracker/events", json=data, headers={"Content-Type": "application/json"}, ) content = response.json assert response.status == 200 assert len(content["events"]) == len(test_events) assert content["sender_id"] == "pushtracker" _, tracker_response = rasa_app.get("/conversations/pushtracker/tracker") tracker = tracker_response.json assert tracker is not None evts = tracker.get("events") assert events.deserialise_events(evts) == test_events def test_sorted_predict(rasa_app: SanicTestClient): _create_tracker_for_sender(rasa_app, "sortedpredict") _, response = rasa_app.post("/conversations/sortedpredict/predict") scores = response.json["scores"] sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"])) assert scores == sorted_scores def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None: data = [event.as_dict() for event in test_events[:3]] _, response = app.put( f"/conversations/{sender_id}/tracker/events", json=data, headers={"Content-Type": "application/json"}, ) assert response.status == 200 def test_get_tracker_with_jwt(rasa_secured_app): # token generated with secret "core" and algorithm HS256 # on https://jwt.io/ # {"user": {"username": "testadmin", "role": "admin"}} jwt_header = { "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9." "eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic" "m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u" "QRre7IWTuIDrCn5AIw" } _, response = rasa_secured_app.get( "/conversations/testadmin/tracker", headers=jwt_header ) assert response.status == 200 _, response = rasa_secured_app.get( "/conversations/testuser/tracker", headers=jwt_header ) assert response.status == 200 # {"user": {"username": "testuser", "role": "user"}} jwt_header = { "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9." "eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb" "2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l" "HJHOxxC_w7TtwCrs" } _, response = rasa_secured_app.get( "/conversations/testadmin/tracker", headers=jwt_header ) assert response.status == 403 _, response = rasa_secured_app.get( "/conversations/testuser/tracker", headers=jwt_header ) assert response.status == 200 def test_list_routes(default_agent): from rasa import server app = server.create_app(default_agent, auth_token=None) routes = utils.list_routes(app) assert set(routes.keys()) == { "hello", "version", "status", "retrieve_tracker", "append_events", "replace_events", "retrieve_story", "execute_action", "predict", "add_message", "train", "evaluate_stories", "evaluate_intents", "tracker_predict", "parse", "load_model", "unload_model", "get_domain", } def test_unload_model_error(rasa_app: SanicTestClient): _, response = rasa_app.get("/status") assert response.status == 200 assert "model_file" in response.json and response.json["model_file"] is not None _, response = rasa_app.delete("/model") assert response.status == 204 def test_get_domain(rasa_app: SanicTestClient): _, response = rasa_app.get("/domain", headers={"accept": "application/json"}) content = response.json assert response.status == 200 assert "config" in content assert "intents" in content assert "entities" in content assert "slots" in content assert "templates" in content assert "actions" in content def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient): _, response = rasa_app.get("/domain") assert response.status == 406 def test_load_model(rasa_app: SanicTestClient, trained_core_model): _, response = rasa_app.get("/status") assert response.status == 200 assert "fingerprint" in response.json old_fingerprint = response.json["fingerprint"] data = {"model_file": trained_core_model} _, response = rasa_app.put("/model", json=data) assert response.status == 204 _, response = rasa_app.get("/status") assert response.status == 200 assert "fingerprint" in response.json assert old_fingerprint != response.json["fingerprint"] def test_load_model_from_model_server(rasa_app: SanicTestClient, trained_core_model): _, response = rasa_app.get("/status") assert response.status == 200 assert "fingerprint" in response.json old_fingerprint = response.json["fingerprint"] endpoint = EndpointConfig("https://example.com/model/trained_core_model") with open(trained_core_model, "rb") as f: with aioresponses(passthrough=["http://127.0.0.1"]) as mocked: headers = {} fs = os.fstat(f.fileno()) headers["Content-Length"] = str(fs[6]) mocked.get( "https://example.com/model/trained_core_model", content_type="application/x-tar", body=f.read(), ) data = {"model_server": {"url": endpoint.url}} _, response = rasa_app.put("/model", json=data) assert response.status == 204 _, response = rasa_app.get("/status") assert response.status == 200 assert "fingerprint" in response.json assert old_fingerprint != response.json["fingerprint"] import rasa.core.jobs rasa.core.jobs.__scheduler = None def test_load_model_invalid_request_body(rasa_app: SanicTestClient): _, response = rasa_app.put("/model") assert response.status == 400 def test_load_model_invalid_configuration(rasa_app: SanicTestClient): data = {"model_file": "some-random-path"} _, response = rasa_app.put("/model", json=data) assert response.status == 400 def test_execute(rasa_app: SanicTestClient): _create_tracker_for_sender(rasa_app, "test_execute") data = {"name": "utter_greet"} _, response = rasa_app.post("/conversations/test_execute/execute", json=data) assert response.status == 200 parsed_content = response.json assert parsed_content["tracker"] assert parsed_content["messages"] def test_execute_with_missing_action_name(rasa_app: SanicTestClient): test_sender = "test_execute_with_missing_action_name" _create_tracker_for_sender(rasa_app, test_sender) data = {"wrong-key": "utter_greet"} _, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data) assert response.status == 400 def test_execute_with_not_existing_action(rasa_app: SanicTestClient): test_sender = "test_execute_with_not_existing_action" _create_tracker_for_sender(rasa_app, test_sender) data = {"name": "ka[pa[opi[opj[oj[oija"} _, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data) assert response.status == 500 @pytest.mark.parametrize( "input_channels, output_channel_to_use, expected_channel", [ (None, "slack", CollectingOutputChannel), ([], None, CollectingOutputChannel), ([RestInput()], "slack", CollectingOutputChannel), ([RestInput()], "rest", CollectingOutputChannel), ([RestInput(), SlackInput("test")], "slack", SlackBot), ], ) def test_get_output_channel( input_channels: List[Text], output_channel_to_use, expected_channel: Type ): request = MagicMock() app = MagicMock() app.input_channels = input_channels request.app = app request.args = {"output_channel": output_channel_to_use} actual = rasa.server._get_output_channel(request, None) assert isinstance(actual, expected_channel) @pytest.mark.parametrize( "input_channels, expected_channel", [ ([], CollectingOutputChannel), ([RestInput()], CollectingOutputChannel), ([RestInput(), SlackInput("test")], SlackBot), ], ) def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type): request = MagicMock() app = MagicMock() app.input_channels = input_channels request.app = app request.args = {"output_channel": "latest"} tracker = DialogueStateTracker.from_events( "default", [UserUttered("text", input_channel="slack")] ) actual = rasa.server._get_output_channel(request, tracker) assert isinstance(actual, expected_channel) def test_app_when_app_has_no_input_channels(): request = MagicMock() class NoInputChannels: pass request.app = NoInputChannels() actual = rasa.server._get_output_channel( request, DialogueStateTracker.from_events("default", []) ) assert isinstance(actual, CollectingOutputChannel)
Flight Reservation.py
# A simple program to explain threading. from threading import * from time import sleep class flightReservation: def __init__(self,ticket_left): self.ticket_left = ticket_left def buy(self,ticketRequest): if(self.ticket_left>=ticketRequest): print("Your tickets are confirmed") print("Make payment and collect tickets") self.ticket_left -= ticketRequest else: print("Sorry, not enough tickets remaining") res = flightReservation(8) t1 = Thread(target=res.buy,args=[3]) t2 = Thread(target=res.buy,args=[4]) t3 = Thread(target=res.buy,args=[7]) t1.start() sleep(2) # To prevent the threads from running parallely and giving false information t2.start() sleep(2) # To prevent the threads from running parallely and giving false information t3.start() sleep(1) input("Enter any Key to exit ")
main.py
from flask import Flask, send_from_directory from flask_cors import CORS from flask_restful import Resource, Api, reqparse, abort from config import debug from db import currencies, trade_currencies, trades, eth_trades, engine from utils import * from contract_helper import getAddressFromPuzzleHash, getContractProgram, programToPuzzleHash, getSolutionProgram, \ getSecretFromSolutionProgram from full_node_client import FullNodeClient from helper import bytes32 from clvm.casts import int_from_bytes, int_to_bytes from math import ceil from eth_thing import * import random import blspy import threading import time import json app = Flask("yakuSwap") cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) api = Api(app) def std_hash(b) -> bytes32: """ The standard hash used in many places. """ return bytes32(blspy.Util.hash256(bytes(b))) class PingService(Resource): def get(self): return {'message': 'Pong!'} class Currencies(Resource): def get(self): conn = engine.connect() s = currencies.select() result = conn.execute(s) res = [] for row in result: res.append(currencyRowToJson(row)) conn.close() return {'currencies': res} class ConnectionStatus(Resource): def get(self): conn = engine.connect() s = currencies.select() result = conn.execute(s) res = [] for row in result: prefix = row[0] client = FullNodeClient( row[9], # ssl_directory row[7], # host row[8] # port ) api_resp = client.getBlockchainState() if api_resp.get("blockchain_state", -1) == -1: res.append({"currency": prefix, "status": "not_connected"}) else: if api_resp["blockchain_state"]["sync"]["synced"]: res.append({"currency": prefix, "status": "connected"}) else: res.append({"currency": prefix, "status": "not_synced"}) conn.close() return {'connections': res} class Trades(Resource): def get(self): conn = engine.connect() s = trades.select() result = conn.execute(s) res = [] for row in result: stmt = trade_currencies.select().where(trade_currencies.c.id == row[1]) trade_currency_one = conn.execute(stmt).fetchall()[0] stmt = trade_currencies.select().where(trade_currencies.c.id == row[2]) trade_currency_two = conn.execute(stmt).fetchall()[0] res.append(tradesRowToJson(row, tradeCurrencyRowToJson(trade_currency_one), tradeCurrencyRowToJson(trade_currency_two))) conn.close() return {'trades': res} class Currency(Resource): def put(self, address_prefix): parser = reqparse.RequestParser() parser.add_argument('name', type=str, required=True) parser.add_argument('photo_url', type=str, required=True) parser.add_argument('units_per_coin', type=int, required=True) parser.add_argument('min_fee', type=int, required=True) parser.add_argument('default_max_block_height', type=int, required=True) parser.add_argument('default_min_confirmation_height', type=int, required=True) parser.add_argument('host', type=str, required=True) parser.add_argument('port', type=int, required=True) parser.add_argument('ssl_directory', type=str, required=True) args = parser.parse_args(strict=True) conn = engine.connect() s = currencies.select().where(currencies.c.address_prefix == address_prefix) result = conn.execute(s) st = None if len(result.fetchall()) == 0: st = currencies.insert() else: st = currencies.update().where(currencies.c.address_prefix == address_prefix) st = st.values( address_prefix=address_prefix, name=args['name'], photo_url=args['photo_url'], units_per_coin=args['units_per_coin'], min_fee=args['min_fee'], default_max_block_height=args['default_max_block_height'], default_min_confirmation_height=args['default_min_confirmation_height'], host=args['host'], port=args['port'], ssl_directory=args['ssl_directory'] ) conn.execute(st) conn.close() return {'success': True} def delete(self, address_prefix): conn = engine.connect() stmt = currencies.delete().where(currencies.c.address_prefix == address_prefix) conn.execute(stmt) conn.close() return {'success': True} trade_threads_ids = [] trade_threads_messages = [] trade_threads_addresses = [] trade_threads_files = [] trade_threads_commands = [] # checkFunc should return True if the trade is still ok def tradeWaitForContract(trade_index, trade, trade_currency, currency, issue_contract, wait=False, other_trade_currency=False, other_currency=False, checkFunc=False): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files program = getContractProgram( trade.secret_hash, trade_currency.total_amount, trade_currency.fee, trade_currency.from_address, trade_currency.to_address, trade_currency.max_block_height ) programPuzzleHash = programToPuzzleHash(program) programAddress = getAddressFromPuzzleHash(programPuzzleHash, currency.address_prefix) trade_threads_files[trade_index].write( f"Waiting for contract with puzzlehash {programPuzzleHash} and address {programAddress} to be confirmed\n") trade_threads_files[trade_index].flush() full_node_client = FullNodeClient( currency.ssl_directory, currency.host, currency.port, trade_threads_files[trade_index] ) amount_to_send = trade_currency.total_amount - trade_currency.fee amount_to_send = amount_to_send / currency.units_per_coin fee = trade_currency.fee / currency.units_per_coin if issue_contract: trade_threads_messages[ trade_index] = f"Please send {amount_to_send:.12f} {currency.name} with a fee of {fee:.12f} {currency.name} to the address found below. Double-check the address before confirming the transaction - if it's wrong, your coins will be lost." trade_threads_addresses[trade_index] = programAddress else: trade_threads_messages[ trade_index] = f"Waiting for the other human to send {amount_to_send:.12f} {currency.name} with a fee of {fee:.12f} {currency.name} to the address found below..." trade_threads_addresses[trade_index] = programAddress if wait: time.sleep(120) height = full_node_client.getBlockchainHeight() shouldCancel = False if other_trade_currency != False and other_currency != False: other_program = getContractProgram( trade.secret_hash, other_trade_currency.total_amount, other_trade_currency.fee, other_trade_currency.from_address, other_trade_currency.to_address, other_trade_currency.max_block_height ) otherProgramPuzzleHash = programToPuzzleHash(other_program) other_full_node_client = FullNodeClient( other_currency.ssl_directory, other_currency.host, other_currency.port, trade_threads_files[trade_index] ) other_coin_record = other_full_node_client.getContractCoinRecord(otherProgramPuzzleHash.hex(), height - 1000 - other_trade_currency.max_block_height) if other_coin_record == False: shouldCancel = True else: other_coin_block_index = other_coin_record['confirmed_block_index'] trade_threads_files[trade_index].write( f"Other coin record: {other_coin_record}\nShould cancel? {shouldCancel}\n") trade_threads_files[trade_index].flush() contract_coin_record = full_node_client.getContractCoinRecord(programPuzzleHash.hex(), height - 1000 - trade_currency.max_block_height) while contract_coin_record == False and shouldCancel == False: time.sleep(60) height = full_node_client.getBlockchainHeight() if other_trade_currency != False: other_height = other_full_node_client.getBlockchainHeight() if other_height - other_coin_block_index >= other_trade_currency.max_block_height * 3 // 4 - ceil( trade_currency.min_confirmation_height * trade_currency.max_block_height / other_trade_currency.max_block_height): shouldCancel = True if checkFunc != False and not checkFunc(): shouldCancel = True if not shouldCancel: contract_coin_record = full_node_client.getContractCoinRecord(programPuzzleHash.hex(), height - 1000 - trade_currency.max_block_height) if shouldCancel == False and contract_coin_record["coin"][ "amount"] != trade_currency.total_amount - trade_currency.fee: trade_threads_files[trade_index].write(f"Trickster detected!\n") trade_threads_files[trade_index].flush() shouldCancel = True trade_threads_files[trade_index].write(f"Contract coin record: {contract_coin_record}\n") trade_threads_files[trade_index].flush() if shouldCancel: trade_threads_files[trade_index].write(f"Should cancel!\n") trade_threads_files[trade_index].flush() trade_threads_messages[trade_index] = "Cancelling trade..." trade_threads_addresses[trade_index] = None else: confirmed_block_index = contract_coin_record['confirmed_block_index'] trade_threads_messages[trade_index] = "Waiting for transaction confirmation..." trade_threads_addresses[trade_index] = None height = full_node_client.getBlockchainHeight() while confirmed_block_index + trade_currency.min_confirmation_height > height: delta = height - confirmed_block_index trade_threads_messages[ trade_index] = f"Waiting for transaction confirmation ({delta} / {trade_currency.min_confirmation_height})" trade_threads_addresses[trade_index] = None time.sleep(10) height = full_node_client.getBlockchainHeight() trade_threads_messages[trade_index] = "Commencing to next step..." trade_threads_addresses[trade_index] = None time.sleep(5) return shouldCancel, contract_coin_record def lookForSolutionInBlockchain(trade_index, trade, trade_currency, currency, coin_record, other_trade_currency=False, other_currency=False): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files program = getContractProgram( trade.secret_hash, trade_currency.total_amount, trade_currency.fee, trade_currency.from_address, trade_currency.to_address, trade_currency.max_block_height ) programPuzzleHash = programToPuzzleHash(program).hex() otherProgram = False otherProgramPuzzleHash = False if other_currency != False: otherProgram = getContractProgram( trade.secret_hash, other_trade_currency.total_amount, other_trade_currency.fee, other_trade_currency.from_address, other_trade_currency.to_address, other_trade_currency.max_block_height ) otherProgramPuzzleHash = programToPuzzleHash(otherProgram).hex() trade_threads_files[trade_index].write( f"Loking for solution of contract with puzzlehash {programPuzzleHash}\nKeeping an eye on {otherProgramPuzzleHash}\n") trade_threads_files[trade_index].flush() full_node_client = FullNodeClient( currency.ssl_directory, currency.host, currency.port, trade_threads_files[trade_index] ) other_full_node_client = False if other_currency != False: other_full_node_client = FullNodeClient( other_currency.ssl_directory, other_currency.host, other_currency.port, trade_threads_files[trade_index] ) if coin_record == False: trade_threads_messages[trade_index] = "Getting contract coin record..." height = full_node_client.getBlockchainHeight() coin_record = full_node_client.getContractCoinRecord(programPuzzleHash, height - 1000 - trade_currency.max_block_height, True) trade_threads_files[trade_index].write(f"Coin record: {coin_record}\n") trade_threads_files[trade_index].flush() if coin_record == False: trade_threads_messages[trade_index] = "Something really strange happened..." trade_threads_files[trade_index].write(f"coin_record is still false?!") trade_threads_files[trade_index].flush() return False trade_threads_messages[trade_index] = "Getting contract solution..." spent_block_index = coin_record["spent_block_index"] other_height = False other_coin_record = False if other_full_node_client != False: other_height = other_full_node_client.getBlockchainHeight() other_coin_record = other_full_node_client.getContractCoinRecord(otherProgramPuzzleHash, other_height - 1000 - other_trade_currency.max_block_height, True) while spent_block_index == 0: time.sleep(15) height = full_node_client.getBlockchainHeight() coin_record = full_node_client.getContractCoinRecord(programPuzzleHash, height - 1000 - trade_currency.max_block_height, True) spent_block_index = coin_record["spent_block_index"] if other_full_node_client != False: other_height = other_full_node_client.getBlockchainHeight() if other_height - other_coin_record[ 'confirmed_block_index'] >= other_trade_currency.max_block_height * 3 // 4: trade_threads_files[trade_index].write(f"Other currency time ran out. Exiting...") trade_threads_files[trade_index].flush() return False if height - coin_record['confirmed_block_index'] >= trade_currency.max_block_height * 3 // 4: trade_threads_files[trade_index].write(f"Main currency time ran out. Exiting...") trade_threads_files[trade_index].flush() return False coin = coin_record["coin"] coin_id = std_hash( bytes.fromhex(coin["parent_coin_info"][2:]) + bytes.fromhex(coin["puzzle_hash"][2:]) + int_to_bytes( coin["amount"])).hex() trade_threads_files[trade_index].write(f"Coin id: {coin_id}\nSpent block index: {spent_block_index}\n") trade_threads_files[trade_index].flush() sol = full_node_client.getCoinSolution(coin_id, spent_block_index) while sol == False: trade_threads_messages[trade_index] = "Getting contract solution (again)..." time.sleep(30) sol = full_node_client.getCoinSolution(coin_id, spent_block_index) trade_threads_files[trade_index].write(f"Solution: {sol}\n") trade_threads_files[trade_index].flush() return sol def tradeClaimContract(trade_index, trade, trade_currency, currency, solution_program_hex, coin_record, cancel=False): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files if cancel: trade_threads_messages[trade_index] = "Preparing to cancel trade :(" trade_threads_files[trade_index].write(f"tradeClaimContract - cancel? {cancel}\n") trade_threads_files[trade_index].flush() program = getContractProgram( trade.secret_hash, trade_currency.total_amount, trade_currency.fee, trade_currency.from_address, trade_currency.to_address, trade_currency.max_block_height ) programPuzzleHash = programToPuzzleHash(program).hex() trade_threads_files[trade_index].write(f"tradeClaimContract - contract with puzzlehash {programPuzzleHash}\n") trade_threads_files[trade_index].flush() full_node_client = FullNodeClient( currency.ssl_directory, currency.host, currency.port, trade_threads_files[trade_index] ) if coin_record == False: trade_threads_messages[trade_index] = "Getting contract coin record..." height = full_node_client.getBlockchainHeight() coin_record = full_node_client.getContractCoinRecord(programPuzzleHash, height - 10000 - trade_currency.max_block_height, True) trade_threads_files[trade_index].write(f"Coin record: {coin_record}\n") trade_threads_files[trade_index].flush() if coin_record == False: trade_threads_messages[trade_index] = "Contract already claimed" return trade_threads_messages[trade_index] = "Waiting for node to be synced..." height = full_node_client.getBlockchainHeight() coin = coin_record["coin"] trade_threads_messages[trade_index] = "Pushing transaction..." r = full_node_client.pushTransaction( program.as_bin().hex(), solution_program_hex, coin ) while r == False: trade_threads_messages[trade_index] = "Pushing transaction again..." r = full_node_client.pushTransaction( program.as_bin().hex(), solution_program_hex, coin ) time.sleep(5) if r == "pending": while r == "pending": trade_threads_messages[ trade_index] = "The transaction was marked as PENDING - I'll push it every 30 seconds just to be sure" r = full_node_client.pushTransaction( program.as_bin().hex(), solution_program_hex, coin ) time.sleep(30) trade_threads_messages[trade_index] = "Done! Check your wallet :)" else: trade_threads_messages[trade_index] = "Done! Check your wallet :)" def shouldCancelTrade(trade_index, trade, trade_currency, currency, coin_record): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files trade_threads_files[trade_index].write(f"Should cancel trade?\n") trade_threads_files[trade_index].flush() program = getContractProgram( trade.secret_hash, trade_currency.total_amount, trade_currency.fee, trade_currency.from_address, trade_currency.to_address, trade_currency.max_block_height ) programPuzzleHash = programToPuzzleHash(program).hex() trade_threads_files[trade_index].write(f"Contract with puzzlehash {programPuzzleHash}\n") trade_threads_files[trade_index].flush() full_node_client = FullNodeClient( currency.ssl_directory, currency.host, currency.port, trade_threads_files[trade_index] ) if coin_record == False: trade_threads_messages[trade_index] = "Getting contract coin record..." height = full_node_client.getBlockchainHeight() coin_record = full_node_client.getContractCoinRecord(programPuzzleHash, height - 10000 - trade_currency.max_block_height, True) if coin_record == False: trade_threads_messages[trade_index] = "Contract already claimed" return False, False trade_threads_files[trade_index].write(f"Coin record: {coin_record}\n") trade_threads_files[trade_index].flush() trade_threads_messages[trade_index] = "Waiting for node to be synced..." height = full_node_client.getBlockchainHeight() trade_threads_messages[trade_index] = "Verifying height..." cancel = False if height - coin_record['confirmed_block_index'] >= trade_currency.max_block_height * 3 // 4: cancel = True return coin_record, cancel def _dumpTradeCurrency(trade_index, trade_currency_one): global trade_threads_files trade_threads_files[trade_index].write(f"Addres prefix: {trade_currency_one.address_prefix}\n") trade_threads_files[trade_index].write(f"Fee: {trade_currency_one.fee}\n") trade_threads_files[trade_index].write(f"Max block height: {trade_currency_one.max_block_height}\n") trade_threads_files[trade_index].write(f"Min conf time: {trade_currency_one.min_confirmation_height}\n") trade_threads_files[trade_index].write(f"From: {trade_currency_one.from_address}\n") trade_threads_files[trade_index].write(f"To: {trade_currency_one.to_address}\n") trade_threads_files[trade_index].write(f"Total amount: {trade_currency_one.total_amount}\n\n\n") trade_threads_files[trade_index].flush() def tradeCode(trade_id): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files trade_index = 0 for i, v in enumerate(trade_threads_ids): if v == trade_id: trade_index = i trade_threads_files[trade_index].write("ONLY SHARE THE CONTENTS OF THIS FILE WITH TRUSTED PEOPLE\n") conn = engine.connect() s = trades.select().where(trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] trade_threads_files[trade_index].write(f"Trade\n\n") trade_threads_files[trade_index].write(f"Trade id: {trade_id}\n") trade_threads_files[trade_index].write(f"Secret hash: {trade.secret_hash}\n") trade_threads_files[trade_index].write(f"Is Buyer?: {trade.is_buyer}\n") trade_threads_files[trade_index].write(f"Secret: {trade.secret}\n") trade_threads_files[trade_index].write(f"Step: {trade.step}\n\n\n") trade_threads_files[trade_index].flush() s = trade_currencies.select().where(trade_currencies.c.id == trade.trade_currency_one) trade_currency_one = conn.execute(s).fetchall()[0] trade_threads_files[trade_index].write(f"Trade currency one\n\n") _dumpTradeCurrency(trade_index, trade_currency_one) s = currencies.select().where(currencies.c.address_prefix == trade_currency_one.address_prefix) currency_one = conn.execute(s).fetchall()[0] s = trade_currencies.select().where(trade_currencies.c.id == trade.trade_currency_two) trade_currency_two = conn.execute(s).fetchall()[0] trade_threads_files[trade_index].write(f"Trade currency two\n\n") _dumpTradeCurrency(trade_index, trade_currency_two) s = currencies.select().where(currencies.c.address_prefix == trade_currency_two.address_prefix) currency_two = conn.execute(s).fetchall()[0] coin_record_one = False coin_record_two = False coming_from_step_0 = False shouldCancel = False if trade.step == 0: shouldCancel, coin_record_one = tradeWaitForContract(trade_index, trade, trade_currency_one, currency_one, trade.is_buyer, True) s = trades.update().where(trades.c.id == trade_id).values(step=1) conn.execute(s) s = trades.select().where(trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] coming_from_step_0 = True if trade.step == 1: shouldCancel, coin_record_two = tradeWaitForContract(trade_index, trade, trade_currency_two, currency_two, not trade.is_buyer, coming_from_step_0, trade_currency_one, currency_one) s = trades.update().where(trades.c.id == trade_id).values(step=2) conn.execute(s) s = trades.select().where(trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] if trade.step == 2: trade_threads_messages[trade_index] = "Starting last step..." trade_threads_addresses[trade_index] = None cancelTrade = shouldCancel if not cancelTrade: if trade.is_buyer: coin_record_two, cancelTrade = shouldCancelTrade(trade_index, trade, trade_currency_two, currency_two, coin_record_two) else: coin_record_one, cancelTrade = shouldCancelTrade(trade_index, trade, trade_currency_one, currency_one, coin_record_one) trade_threads_files[trade_index].write(f"Cancel trade: {cancelTrade}\n") trade_threads_files[trade_index].flush() if cancelTrade: solution_program = getSolutionProgram( "CANCEL-" + str(random.SystemRandom().getrandbits(128))).as_bin().hex() if trade.is_buyer: tradeClaimContract(trade_index, trade, trade_currency_one, currency_one, solution_program, coin_record_one, True) else: tradeClaimContract(trade_index, trade, trade_currency_two, currency_two, solution_program, coin_record_two, True) else: if trade.is_buyer: solution_program = getSolutionProgram(trade.secret).as_bin().hex() tradeClaimContract(trade_index, trade, trade_currency_two, currency_two, solution_program, coin_record_two) else: solution_program = lookForSolutionInBlockchain(trade_index, trade, trade_currency_two, currency_two, coin_record_two, trade_currency_one, currency_one) if solution_program == False: tradeClaimContract(trade_index, trade, trade_currency_two, currency_two, solution_program, coin_record_two, True) else: tradeClaimContract(trade_index, trade, trade_currency_one, currency_one, solution_program, coin_record_one) conn.close() class Trade(Resource): def get(self, trade_id): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files, trade_threads_commands if not trade_id in trade_threads_ids: t = threading.Thread(target=tradeCode, args=(trade_id,)) trade_threads_ids.append(trade_id) trade_threads_messages.append("Starting thread...") trade_threads_addresses.append(None) trade_threads_files.append(open(f"{trade_id}-log.txt", "a+")) trade_threads_commands.append(None) t.start() index = 0 for i, v in enumerate(trade_threads_ids): if v == trade_id: index = i return { "message": trade_threads_messages[index], "address": trade_threads_addresses[index] } def addTradeCurrency(self, engine, data): conn = engine.connect() s = trade_currencies.select().where(trade_currencies.c.id == data['id']) result = conn.execute(s) st = None if len(result.fetchall()) == 0: st = trade_currencies.insert() else: st = trade_currencies.update().where(trade_currencies.c.id == data['id']) st = st.values( id=data['id'], address_prefix=data['address_prefix'], fee=data['fee'], max_block_height=data['max_block_height'], min_confirmation_height=data['min_confirmation_height'], from_address=data['from_address'], to_address=data['to_address'], total_amount=data['total_amount'] ) conn.execute(st) conn.close() def put(self, trade_id): parser = reqparse.RequestParser() parser.add_argument('trade_currency_one', type=dict, required=True) parser.add_argument('trade_currency_two', type=dict, required=True) parser.add_argument('secret', type=str, required=True) parser.add_argument('secret_hash', type=str, required=True) parser.add_argument('is_buyer', type=bool, required=True) parser.add_argument('secret', type=str, required=True) parser.add_argument('step', type=int, required=True) args = parser.parse_args(strict=True) self.addTradeCurrency(engine, args['trade_currency_one']) self.addTradeCurrency(engine, args['trade_currency_two']) conn = engine.connect() s = trades.select().where(trades.c.id == trade_id) result = conn.execute(s) st = None if len(result.fetchall()) == 0: st = trades.insert() else: st = trades.update().where(trades.c.id == trade_id) st = st.values( id=trade_id, trade_currency_one=args['trade_currency_one']['id'], trade_currency_two=args['trade_currency_two']['id'], secret_hash=args['secret_hash'], is_buyer=args['is_buyer'], secret=args['secret'], step=args['step'], ) conn.execute(st) conn.close() return {'success': True} def delete(self, trade_id): conn = engine.connect() stmt = trades.delete().where(trades.c.id == trade_id) conn.execute(stmt) conn.close() return {'success': True} class EthTrades(Resource): def get(self): conn = engine.connect() s = eth_trades.select() result = conn.execute(s) res = [] for row in result: stmt = trade_currencies.select().where(trade_currencies.c.id == row[1]) trade_currency = conn.execute(stmt).fetchall()[0] res.append(ethTradesRowToJson(row, tradeCurrencyRowToJson(trade_currency))) conn.close() return {'trades': res} eth_trade_responses = {} def getResponse(trade_id, key, retry=True): global eth_trade_responses if eth_trade_responses.get(trade_id, -1) == -1: eth_trade_responses[trade_id] = {} val = eth_trade_responses[trade_id].get(key, -1) while retry and val == -1: time.sleep(1) val = eth_trade_responses[trade_id].get(key, -1) if val == -1: return False return val def ethTradeCode(trade_id): global trade_threads_ids, trade_threads_messages, trade_threads_addresses, trade_threads_files, eth_trade_responses global ETH_MAX_BLOCK_HEIGHT, ETH_REQUIRED_CONFIRMATIONS trade_index = 0 for i, v in enumerate(trade_threads_ids): if v == trade_id: trade_index = i trade_threads_files[trade_index].write("ONLY SHARE THE CONTENTS OF THIS FILE WITH TRUSTED PEOPLE\n") conn = engine.connect() s = eth_trades.select().where(eth_trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] trade_threads_files[trade_index].write(f"Trade\n\n") trade_threads_files[trade_index].write(f"Trade id: {trade_id}\n") trade_threads_files[trade_index].write(f"Secret hash: {trade.secret_hash}\n") trade_threads_files[trade_index].write(f"Is Buyer?: {trade.is_buyer}\n") trade_threads_files[trade_index].write(f"Secret: {trade.secret}\n") trade_threads_files[trade_index].write(f"Step: {trade.step}\n\n\n") trade_threads_files[trade_index].flush() s = trade_currencies.select().where(trade_currencies.c.id == trade.trade_currency) trade_currency = conn.execute(s).fetchall()[0] trade_threads_files[trade_index].write(f"Trade currency\n\n") _dumpTradeCurrency(trade_index, trade_currency) s = currencies.select().where(currencies.c.address_prefix == trade_currency.address_prefix) currency = conn.execute(s).fetchall()[0] coin_record = False coming_from_step_0 = False shouldCancel = False swap_id = "None" swap_data = { "contract_address": getContractAddress(trade[9]), "token_address": getTokenAddress(trade[9], trade[10]), "secret_hash": trade[5], "from_address": trade[2], "to_address": trade[3], "max_block_height": ETH_MAX_BLOCK_HEIGHT, "amount": trade[4] } if trade.step == 0: trade_threads_addresses[trade_index] = None trade_threads_files[trade_index].write(f"Swap data: {json.dumps(swap_data)}\n\n") trade_threads_files[trade_index].flush() if trade.is_buyer: trade_threads_messages[ trade_index] = f"Press the button below to create the swap on the Ethereum blockchain\nMake sure you're connected to the following network: {trade[9]}" trade_threads_commands[trade_index] = {"code": "CREATE_SWAP", "args": swap_data} token_approval_tx_sent = getResponse(trade_id, "token_approval_tx_sent") trade_threads_messages[trade_index] = "Confirming token approval..." trade_threads_commands[trade_index] = None token_approval_tx_confirmed = getResponse(trade_id, "token_approval_tx_confirmed") trade_threads_messages[trade_index] = "Please approve the 2nd transaction" created = getResponse(trade_id, "createSwap_tx_sent") trade_threads_messages[trade_index] = "Waiting for 2nd transaction to be confirmed..." created = getResponse(trade_id, "swap_created") trade_threads_messages[ trade_index] = f"Waiting for swap to be confirmed on the Ethereum blockchain...\nMake sure you're connected to the following network: {trade[9]}" trade_threads_commands[trade_index] = {"code": "WAIT_FOR_SWAP", "args": swap_data} swap_id = getResponse(trade_id, "swap_id") confirmations = getResponse(trade_id, "confirmations") while confirmations < ETH_REQUIRED_CONFIRMATIONS: trade_threads_messages[ trade_index] = f"Confirming swap creation ({confirmations}/{ETH_REQUIRED_CONFIRMATIONS})" confirmations = getResponse(trade_id, "confirmations") time.sleep(5) trade_threads_messages[trade_index] = f"Commencing to next step..." trade_threads_commands[trade_index] = None shouldCancel = getResponse(trade_id, "should_cancel", False) s = eth_trades.update().where(eth_trades.c.id == trade_id).values(step=1) conn.execute(s) s = eth_trades.select().where(eth_trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] coming_from_step_0 = True if trade.step == 1: shouldCancel = shouldCancel or getResponse(trade_id, "should_cancel", False) if not shouldCancel: eth_trade_responses[trade_id]['confirmations'] = -2 trade_threads_messages[trade_index] = f"Getting ETH transaction confirmations..." trade_threads_commands[trade_index] = {"code": "WAIT_FOR_SWAP", "args": swap_data} while eth_trade_responses[trade_id]['confirmations'] == -2: time.sleep(1) trade_threads_commands[trade_index] = None def checkFunc(): global eth_trade_responses return eth_trade_responses[trade_id]["confirmations"] < ETH_MAX_BLOCK_HEIGHT * 3 // 4 shouldCancel, coin_record = tradeWaitForContract(trade_index, trade, trade_currency, currency, not trade.is_buyer, False, False, False, checkFunc) s = eth_trades.update().where(eth_trades.c.id == trade_id).values(step=2) conn.execute(s) s = eth_trades.select().where(eth_trades.c.id == trade_id) trade = conn.execute(s).fetchall()[0] if trade.step == 2: trade_threads_messages[trade_index] = "Starting last step..." trade_threads_addresses[trade_index] = None cancelTrade = shouldCancel or getResponse(trade_id, "should_cancel", False) if not cancelTrade: coin_record, cancelTrade = shouldCancelTrade(trade_index, trade, trade_currency, currency, coin_record) eth_trade_responses[trade_id]['confirmations'] = -2 trade_threads_messages[trade_index] = f"Verifying ETH height..." trade_threads_commands[trade_index] = {"code": "WAIT_FOR_SWAP", "args": swap_data} while eth_trade_responses[trade_id]["confirmations"] == -2: time.sleep(1) trade_threads_commands[trade_index] = None cancelTrade = cancelTrade or ( eth_trade_responses[trade_id]['confirmations'] > ETH_MAX_BLOCK_HEIGHT * 3 // 4) trade_threads_files[trade_index].write(f"Cancel trade: {cancelTrade}\n") trade_threads_files[trade_index].flush() if cancelTrade: cancelStr = "CANCEL-" + str(random.SystemRandom().getrandbits(128)) if not trade.is_buyer: solution_program = getSolutionProgram(cancelStr).as_bin().hex() tradeClaimContract(trade_index, trade, trade_currency, currency, solution_program, coin_record, True) else: trade_threads_messages[trade_index] = "Cancel trade - waiting for the swap to expire..." trade_threads_commands[trade_index] = None while eth_trade_responses[trade_id]["confirmations"] < ETH_MAX_BLOCK_HEIGHT: trade_threads_messages[ trade_index] = f"{ETH_MAX_BLOCK_HEIGHT - eth_trade_responses[trade_id]['confirmations']} blocks left before you can cancel the swap..." trade_threads_commands[trade_index] = {"code": "WAIT_FOR_SWAP", "args": swap_data} trade_threads_messages[trade_index] = "Press the button below to cancel the swap :(" trade_threads_commands[trade_index] = {"code": "CANCEL_SWAP", "args": swap_data} swap_completed = getResponse(trade_id, "swap_completed") else: if not trade.is_buyer: trade_threads_messages[trade_index] = "Searching the Chia blockchan for a solution..." trade_threads_commands[trade_index] = None solution_program = lookForSolutionInBlockchain(trade_index, trade, trade_currency, currency, coin_record) secret = getSecretFromSolutionProgram(solution_program) trade_threads_messages[trade_index] = "Press the button below to claim your ETH" swap_data["secret"] = secret trade_threads_commands[trade_index] = {"code": "COMPLETE_SWAP", "args": swap_data} swap_completed = getResponse(trade_id, "swap_completed") else: trade_threads_messages[trade_index] = "Preparing to claim XCH..." trade_threads_commands[trade_index] = None solution_program = getSolutionProgram(trade.secret).as_bin().hex() tradeClaimContract(trade_index, trade, trade_currency, currency, solution_program, coin_record) trade_threads_messages[trade_index] = "Done :)" trade_threads_commands[trade_index] = None conn.close() class EthTrade(Resource): def get(self, trade_id): global trade_threads_ids, etrade_threads_messages, trade_threads_addresses, trade_threads_commands, trade_threads_files if not trade_id in trade_threads_ids: t = threading.Thread(target=ethTradeCode, args=(trade_id,)) trade_threads_ids.append(trade_id) trade_threads_messages.append("Starting thread...") trade_threads_addresses.append(None) trade_threads_commands.append(None) trade_threads_files.append(open(f"{trade_id}-ETH-log.txt", "a+")) t.start() index = 0 for i, v in enumerate(trade_threads_ids): if v == trade_id: index = i return { "message": trade_threads_messages[index], "address": trade_threads_addresses[index], "command": trade_threads_commands[index] } def addTradeCurrency(self, engine, data): conn = engine.connect() s = trade_currencies.select().where(trade_currencies.c.id == data['id']) result = conn.execute(s) st = None if len(result.fetchall()) == 0: st = trade_currencies.insert() else: st = trade_currencies.update().where(trade_currencies.c.id == data['id']) st = st.values( id=data['id'], address_prefix=data['address_prefix'], fee=data['fee'], max_block_height=data['max_block_height'], min_confirmation_height=data['min_confirmation_height'], from_address=data['from_address'], to_address=data['to_address'], total_amount=data['total_amount'] ) conn.execute(st) conn.close() def post(self, trade_id): global eth_trade_responses parser = reqparse.RequestParser() parser.add_argument('data', type=dict, required=True) args = parser.parse_args(strict=True) if eth_trade_responses.get(trade_id, -1) == -1: eth_trade_responses[trade_id] = {} for key, value in args['data'].items(): eth_trade_responses[trade_id][key] = value def put(self, trade_id): parser = reqparse.RequestParser() parser.add_argument('trade_currency', type=dict, required=True) parser.add_argument('eth_from_address', type=str, required=True) parser.add_argument('eth_to_address', type=str, required=True) parser.add_argument('total_gwei', type=int, required=True) parser.add_argument('secret', type=str, required=True) parser.add_argument('secret_hash', type=str, required=True) parser.add_argument('is_buyer', type=bool, required=True) parser.add_argument('secret', type=str, required=True) parser.add_argument('step', type=int, required=True) parser.add_argument('network', type=str, required=True) parser.add_argument('token', type=str, required=True) args = parser.parse_args(strict=True) self.addTradeCurrency(engine, args['trade_currency']) conn = engine.connect() s = eth_trades.select().where(eth_trades.c.id == trade_id) result = conn.execute(s) st = None if len(result.fetchall()) == 0: st = eth_trades.insert() else: st = eth_trades.update().where(eth_trades.c.id == trade_id) st = st.values( id=trade_id, trade_currency=args['trade_currency']['id'], eth_from_address=args['eth_from_address'], eth_to_address=args['eth_to_address'], total_gwei=args['total_gwei'], secret_hash=args['secret_hash'], is_buyer=args['is_buyer'], secret=args['secret'], step=args['step'], network=args['network'], token=args['token'] ) conn.execute(st) conn.close() return {'success': True} def delete(self, trade_id): conn = engine.connect() stmt = eth_trades.delete().where(eth_trades.c.id == trade_id) conn.execute(stmt) conn.close() return {'success': True} class EthNetworks(Resource): def get(self): return json.loads(getNetworksString()) api.add_resource(PingService, '/api/ping') api.add_resource(ConnectionStatus, '/api/connection-status') api.add_resource(Currencies, '/api/currencies') api.add_resource(Trades, '/api/trades') api.add_resource(Currency, '/api/currency/<string:address_prefix>') api.add_resource(Trade, '/api/trade/<string:trade_id>') api.add_resource(EthTrades, '/api/eth/trades') api.add_resource(EthNetworks, '/api/eth/networks') api.add_resource(EthTrade, '/api/eth/trade/<string:trade_id>') @app.route('/', defaults={'path': 'index.html'}) @app.route('/<path:path>') def get_resource(path): return send_from_directory('html', path) if __name__ == '__main__': app.run(host='127.0.0.1', port=4143, debug=debug)
import_dir.py
import sys sys.path.append('/app/') import os import argparse import progressbar import threading import json import urllib3 import requests import queue import time from util import allowed_file, str2bool, batch_inference, load_image from urllib.parse import quote urllib3.disable_warnings() es_index = "es-retrieval" num_files_per_request = 10 num_threads = 4 q = queue.Queue() lock = threading.Lock() count = 0 headers = { 'User-Agent': 'Import Client'} es_index_tpl_str = """ { "settings": { "number_of_shards": 3 }, "mappings": { "properties": { %s "f0": {"type": "keyword"}, "f1": {"type": "keyword"}, "f2": {"type": "keyword"}, "f3": {"type": "keyword"}, "r0": {"type": "long"}, "r1": {"type": "long"}, "r2": {"type": "long"}, "r3": {"type": "long"} } } } """ pb_widgets = [progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()] def es_drop_and_create_index(): # Drop index r = requests.delete(args.es_url + "/" + es_index, verify=False) print(args.es_url, r.text) # Create index cols = ['id', 'imageurl', 'thumburl'] data_fields = "".join( [" \"" + field_name + "\": { \"type\": \"keyword\", \"index\": false }, " for field_name in cols]) q = es_index_tpl_str % (data_fields,) print(q) r = requests.put(args.es_url + "/" + es_index, q, headers={'Content-Type': 'application/json'}) print(r.text) # No read-only r = requests.put(args.es_url + "/" + es_index + "/_settings", """{"index": {"blocks": {"read_only_allow_delete": "false"}}}""", headers={'Content-Type': 'application/json'}) print(r.text) def process_batch(batch): s = "" for r in batch: s += """{ "index": { "_index":"%s" } } """ % (es_index,) s += json.dumps(r).replace('\n', ' ') + "\n" # print (s) r = requests.post(args.es_url + "/" + es_index + "/_bulk", s, headers={"Content-Type": "application/x-ndjson"}) print(r.text) def process_batch(batch): results = batch_inference(batch) # results = json.loads(r.text)["results"] s = "" for r in results: code_dict = r["codes"] webpath = os.path.join("static/images/",quote(r["fieldname"])) code_dict["imageurl"] = webpath code_dict["thumburl"]= webpath code_dict["id"] = r["fieldname"] s += """{ "index": { "_index":"%s" } } """ % (es_index,) s += json.dumps(code_dict).replace('\n', ' ') + "\n" # s += es_generate_doc_str(code_dict).replace('\n', ' ') + "\n" r = requests.post(args.es_url + "/" + es_index + "/_bulk", s, headers={"Content-Type": "application/x-ndjson"}) def get_image_batch(batch): files = {} for p in batch: local_file = open(os.path.join(image_dir, p), "rb") img = local_file.read() img = load_image(img) files[p] = {p: img} return files def worker(): while True: batch = q.get() if batch is None: break image_batch = get_image_batch(batch) process_batch(image_batch) global count global bar lock.acquire() count += num_files_per_request count = count if count <= num_files_total else num_files_total bar.update(count) lock.release() q.task_done() if __name__ == '__main__': parser = argparse.ArgumentParser( description='Create ES index, compute and import image from imagefiles') parser.add_argument( '--es_url', default="http://elasticsearch:9200", type=str, help='Elastic Search URL with port (default: http://elasticsearch:9200)' ) parser.add_argument( '--es_index', default="es-retrieval", type=str, help='Elastic Search index name (default: es-retrieval)' ) parser.add_argument( '--images_dir', default="images", type=str, help='Directory containing keyframes' ) args = parser.parse_args() image_dir = args.images_dir es_index = args.es_index pathes = [] print("""Reading images...""") for root, dirs, files in os.walk(image_dir): for filename in files: if (allowed_file(filename)): path = os.path.join(os.path.relpath(root, image_dir), filename) print ("Adding " + path) pathes.append(path) # pathes = pathes[:10000] num_files_total = len(pathes) print("""%d images found.""" % (num_files_total,)) # ES index if not es_index.islower(): raise ("Index needs to be lowercase") # Init threads threads = [] for i in range(num_threads): t = threading.Thread(target=worker) t.start() threads.append(t) es_drop_and_create_index() s = time.time() threads = [] print("Processing images...") bar = progressbar.ProgressBar(maxval=num_files_total, \ widgets=pb_widgets) bar.start() for start in range(0, num_files_total, num_files_per_request): end = start + num_files_per_request end = end if end <= num_files_total else num_files_total batch = pathes[start:end] q.put(batch) q.join() # stop workers for i in range(num_threads): q.put(None) for t in threads: t.join() bar.finish() duration = time.time() - s print(""" ------------------------------------------------------- Total time: %0.2fs for %d images Time per image: %0.2fs """ % (duration, num_files_total, duration / num_files_total))
coref_model.py
import operator import random import math import json import threading import numpy as np import tensorflow as tf import util import coref_ops import conll import metrics class CorefModel(object): def __init__(self, config): self.config = config self.embedding_info = [(emb["size"], emb["lowercase"]) for emb in config["embeddings"]] self.embedding_size = sum(size for size, _ in self.embedding_info) self.char_embedding_size = config["char_embedding_size"] self.char_dict = util.load_char_dict(config["char_vocab_path"]) self.embedding_dicts = [util.load_embedding_dict(emb["path"], emb["size"], emb["format"]) for emb in config["embeddings"]] self.max_mention_width = config["max_mention_width"] self.genres = { g:i for i,g in enumerate(config["genres"]) } self.eval_data = None # Load eval data lazily. input_props = [] input_props.append((tf.float32, [None, None, self.embedding_size])) # Text embeddings. input_props.append((tf.int32, [None, None, None])) # Character indices. input_props.append((tf.int32, [None])) # Text lengths. input_props.append((tf.int32, [None])) # Speaker IDs. input_props.append((tf.int32, [])) # Genre. input_props.append((tf.bool, [])) # Is training. input_props.append((tf.int32, [None])) # Gold starts. input_props.append((tf.int32, [None])) # Gold ends. input_props.append((tf.int32, [None])) # Cluster ids. self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props] dtypes, shapes = zip(*input_props) queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes) self.enqueue_op = queue.enqueue(self.queue_input_tensors) self.input_tensors = queue.dequeue() self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors) self.global_step = tf.Variable(0, name="global_step", trainable=False) self.reset_global_step = tf.assign(self.global_step, 0) learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step, self.config["decay_frequency"], self.config["decay_rate"], staircase=True) trainable_params = tf.trainable_variables() gradients = tf.gradients(self.loss, trainable_params) gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"]) optimizers = { "adam" : tf.train.AdamOptimizer, "sgd" : tf.train.GradientDescentOptimizer } optimizer = optimizers[self.config["optimizer"]](learning_rate) self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step) def start_enqueue_thread(self, session): with open(self.config["train_path"]) as f: train_examples = [json.loads(jsonline) for jsonline in f.readlines()] def _enqueue_loop(): while True: random.shuffle(train_examples) for example in train_examples: tensorized_example = self.tensorize_example(example, is_training=True) feed_dict = dict(zip(self.queue_input_tensors, tensorized_example)) session.run(self.enqueue_op, feed_dict=feed_dict) enqueue_thread = threading.Thread(target=_enqueue_loop) enqueue_thread.daemon = True enqueue_thread.start() def tensorize_mentions(self, mentions): if len(mentions) > 0: starts, ends = zip(*mentions) else: starts, ends = [], [] return np.array(starts), np.array(ends) def tensorize_example(self, example, is_training, oov_counts=None): clusters = example["clusters"] gold_mentions = sorted(tuple(m) for m in util.flatten(clusters)) gold_mention_map = {m:i for i,m in enumerate(gold_mentions)} cluster_ids = np.zeros(len(gold_mentions)) for cluster_id, cluster in enumerate(clusters): for mention in cluster: cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id sentences = example["sentences"] num_words = sum(len(s) for s in sentences) speakers = util.flatten(example["speakers"]) assert num_words == len(speakers) max_sentence_length = max(len(s) for s in sentences) max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"])) word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size]) char_index = np.zeros([len(sentences), max_sentence_length, max_word_length]) text_len = np.array([len(s) for s in sentences]) for i, sentence in enumerate(sentences): for j, word in enumerate(sentence): current_dim = 0 for k, (d, (s,l)) in enumerate(zip(self.embedding_dicts, self.embedding_info)): if l: current_word = word.lower() else: current_word = word if oov_counts is not None and current_word not in d: oov_counts[k] += 1 word_emb[i, j, current_dim:current_dim + s] = util.normalize(d[current_word]) current_dim += s char_index[i, j, :len(word)] = [self.char_dict[c] for c in word] speaker_dict = { s:i for i,s in enumerate(set(speakers)) } speaker_ids = np.array([speaker_dict[s] for s in speakers]) doc_key = example["doc_key"] genre = self.genres[doc_key[:2]] gold_starts, gold_ends = self.tensorize_mentions(gold_mentions) if is_training and len(sentences) > self.config["max_training_sentences"]: return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids) else: return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids def truncate_example(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): max_training_sentences = self.config["max_training_sentences"] num_sentences = word_emb.shape[0] assert num_sentences > max_training_sentences sentence_offset = random.randint(0, num_sentences - max_training_sentences) word_offset = text_len[:sentence_offset].sum() num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum() word_emb = word_emb[sentence_offset:sentence_offset + max_training_sentences,:,:] char_index = char_index[sentence_offset:sentence_offset + max_training_sentences,:,:] text_len = text_len[sentence_offset:sentence_offset + max_training_sentences] speaker_ids = speaker_ids[word_offset: word_offset + num_words] gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words) gold_starts = gold_starts[gold_spans] - word_offset gold_ends = gold_ends[gold_spans] - word_offset cluster_ids = cluster_ids[gold_spans] return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids def get_predictions_and_loss(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids): self.gold_starts = gold_starts self.gold_ends = gold_ends self.cluster_ids = cluster_ids self.dropout = 1 - (tf.to_float(is_training) * self.config["dropout_rate"]) self.lexical_dropout = 1 - (tf.to_float(is_training) * self.config["lexical_dropout_rate"]) num_sentences = tf.shape(word_emb)[0] max_sentence_length = tf.shape(word_emb)[1] text_emb_list = [word_emb] if self.config["char_embedding_size"] > 0: char_emb = tf.gather(tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]]), char_index) # [num_sentences, max_sentence_length, max_word_length, emb] flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2), util.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb] flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb] aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, util.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb] text_emb_list.append(aggregated_char_emb) text_emb = tf.concat(text_emb_list, 2) text_emb = tf.nn.dropout(text_emb, self.lexical_dropout) text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length]) # self.text_len_mask = text_len_mask[0] text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask) text_outputs = tf.nn.dropout(text_outputs, self.dropout) genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb] sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length] flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words] flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words] self.flattened_sentence_indices = flattened_sentence_indices candidate_starts, candidate_ends = coref_ops.spans( sentence_indices=flattened_sentence_indices, max_width=self.max_mention_width) candidate_starts.set_shape([None]) candidate_ends.set_shape([None]) candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, candidate_starts, candidate_ends) # [num_candidates, emb] candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1] candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions] k = tf.to_int32(tf.floor(tf.to_float(tf.shape(text_outputs)[0]) * self.config["mention_ratio"])) predicted_mention_indices = coref_ops.extract_mentions(candidate_mention_scores, candidate_starts, candidate_ends, k) # ([k], [k]) predicted_mention_indices.set_shape([None]) mention_starts = tf.gather(candidate_starts, predicted_mention_indices) # [num_mentions] mention_ends = tf.gather(candidate_ends, predicted_mention_indices) # [num_mentions] mention_emb = tf.gather(candidate_mention_emb, predicted_mention_indices) # [num_mentions, emb] mention_scores = tf.gather(candidate_mention_scores, predicted_mention_indices) # [num_mentions] mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb] mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb] mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions] max_antecedents = self.config["max_antecedents"] antecedents, antecedent_labels, antecedents_len = coref_ops.antecedents(mention_starts, mention_ends, gold_starts, gold_ends, cluster_ids, max_antecedents) # ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions] antecedents.set_shape([None, None]) antecedent_labels.set_shape([None, None]) antecedents_len.set_shape([None]) antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len, mention_starts, mention_ends, mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1] loss = self.softmax_loss(antecedent_scores, antecedent_labels) # [num_mentions] loss = tf.reduce_sum(loss) # [] return [ candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores ], loss def get_mention_emb(self, text_emb, text_outputs, mention_starts, mention_ends): mention_emb_list = [] mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb] mention_emb_list.append(mention_start_emb) mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb] mention_emb_list.append(mention_end_emb) mention_width = 1 + mention_ends - mention_starts # [num_mentions] if self.config["use_features"]: mention_width_index = mention_width - 1 # [num_mentions] mention_width_emb = tf.gather(tf.get_variable("mention_width_embeddings", [self.config["max_mention_width"], self.config["feature_size"]]), mention_width_index) # [num_mentions, emb] mention_width_emb = tf.nn.dropout(mention_width_emb, self.dropout) mention_emb_list.append(mention_width_emb) if self.config["model_heads"]: mention_indices = tf.expand_dims(tf.range(self.config["max_mention_width"]), 0) + tf.expand_dims(mention_starts, 1) # [num_mentions, max_mention_width] mention_indices = tf.minimum(util.shape(text_outputs, 0) - 1, mention_indices) # [num_mentions, max_mention_width] mention_text_emb = tf.gather(text_emb, mention_indices) # [num_mentions, max_mention_width, emb] self.head_scores = util.projection(text_outputs, 1) # [num_words, 1] mention_head_scores = tf.gather(self.head_scores, mention_indices) # [num_mentions, max_mention_width, 1] mention_mask = tf.expand_dims(tf.sequence_mask(mention_width, self.config["max_mention_width"], dtype=tf.float32), 2) # [num_mentions, max_mention_width, 1] mention_attention = tf.nn.softmax(mention_head_scores + tf.log(mention_mask), dim=1) # [num_mentions, max_mention_width, 1] mention_head_emb = tf.reduce_sum(mention_attention * mention_text_emb, 1) # [num_mentions, emb] mention_emb_list.append(mention_head_emb) mention_emb = tf.concat(mention_emb_list, 1) # [num_mentions, emb] return mention_emb def get_mention_scores(self, mention_emb): with tf.variable_scope("mention_scores"): return util.ffnn(mention_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [num_mentions, 1] def softmax_loss(self, antecedent_scores, antecedent_labels): gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [num_mentions, max_ant + 1] marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [num_mentions] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [num_mentions] return log_norm - marginalized_gold_scores # [num_mentions] def get_antecedent_scores(self, mention_emb, mention_scores, antecedents, antecedents_len, mention_starts, mention_ends, mention_speaker_ids, genre_emb): num_mentions = util.shape(mention_emb, 0) max_antecedents = util.shape(antecedents, 1) feature_emb_list = [] if self.config["use_metadata"]: antecedent_speaker_ids = tf.gather(mention_speaker_ids, antecedents) # [num_mentions, max_ant] same_speaker = tf.equal(tf.expand_dims(mention_speaker_ids, 1), antecedent_speaker_ids) # [num_mentions, max_ant] speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [num_mentions, max_ant, emb] feature_emb_list.append(speaker_pair_emb) tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [num_mentions, max_antecedents, 1]) # [num_mentions, max_ant, emb] feature_emb_list.append(tiled_genre_emb) if self.config["use_features"]: target_indices = tf.range(num_mentions) # [num_mentions] mention_distance = tf.expand_dims(target_indices, 1) - antecedents # [num_mentions, max_ant] mention_distance_bins = coref_ops.distance_bins(mention_distance) # [num_mentions, max_ant] mention_distance_bins.set_shape([None, None]) mention_distance_emb = tf.gather(tf.get_variable("mention_distance_emb", [10, self.config["feature_size"]]), mention_distance_bins) # [num_mentions, max_ant] feature_emb_list.append(mention_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [num_mentions, max_ant, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [num_mentions, max_ant, emb] antecedent_emb = tf.gather(mention_emb, antecedents) # [num_mentions, max_ant, emb] self.mention_emb_shape = tf.shape(mention_emb) self.mention_start_shape = tf.shape(antecedents) target_emb_tiled = tf.tile(tf.expand_dims(mention_emb, 1), [1, max_antecedents, 1]) # [num_mentions, max_ant, emb] similarity_emb = antecedent_emb * target_emb_tiled # [num_mentions, max_ant, emb] pair_emb = tf.concat([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2) # [num_mentions, max_ant, emb] with tf.variable_scope("iteration"): with tf.variable_scope("antecedent_scoring"): antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [num_mentions, max_ant, 1] antecedent_scores = tf.squeeze(antecedent_scores, 2) # [num_mentions, max_ant] antecedent_mask = tf.log(tf.sequence_mask(antecedents_len, max_antecedents, dtype=tf.float32)) # [num_mentions, max_ant] antecedent_scores += antecedent_mask # [num_mentions, max_ant] antecedent_scores += tf.expand_dims(mention_scores, 1) + tf.gather(mention_scores, antecedents) # [num_mentions, max_ant] antecedent_scores = tf.concat([tf.zeros([util.shape(mention_scores, 0), 1]), antecedent_scores], 1) # [num_mentions, max_ant + 1] return antecedent_scores # [num_mentions, max_ant + 1] def flatten_emb_by_sentence(self, emb, text_len_mask): num_sentences = tf.shape(emb)[0] max_sentence_length = tf.shape(emb)[1] emb_rank = len(emb.get_shape()) if emb_rank == 2: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length]) elif emb_rank == 3: flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)]) else: raise ValueError("Unsupported rank: {}".format(emb_rank)) return tf.boolean_mask(flattened_emb, text_len_mask) def encode_sentences(self, text_emb, text_len, text_len_mask): num_sentences = tf.shape(text_emb)[0] max_sentence_length = tf.shape(text_emb)[1] # Transpose before and after for efficiency. inputs = tf.transpose(text_emb, [1, 0, 2]) # [max_sentence_length, num_sentences, emb] # inputs = tf.unstack(inputs, axis = 0) with tf.variable_scope("fw_cell"): cell_fw = util.CustomLSTMCell(self.config["lstm_size"], num_sentences, self.dropout) preprocessed_inputs_fw = cell_fw.preprocess_input(inputs) with tf.variable_scope("bw_cell"): cell_bw = util.CustomLSTMCell(self.config["lstm_size"], num_sentences, self.dropout) preprocessed_inputs_bw = cell_bw.preprocess_input(inputs) preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw, seq_lengths=text_len, seq_dim=0, batch_dim=1) state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1])) state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1])) with tf.variable_scope("lstm"): with tf.variable_scope("fw_lstm"): fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw, inputs=preprocessed_inputs_fw, sequence_length=text_len, initial_state=state_fw, time_major=True) with tf.variable_scope("bw_lstm"): bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw, inputs=preprocessed_inputs_bw, sequence_length=text_len, initial_state=state_bw, time_major=True) bw_outputs = tf.reverse_sequence(bw_outputs, seq_lengths=text_len, seq_dim=0, batch_dim=1) text_outputs = tf.concat([fw_outputs, bw_outputs], 2) text_outputs = tf.transpose(text_outputs, [1, 0, 2]) # [num_sentences, max_sentence_length, emb] return self.flatten_emb_by_sentence(text_outputs, text_len_mask) def evaluate_mentions(self, candidate_starts, candidate_ends, mention_starts, mention_ends, mention_scores, gold_starts, gold_ends, example, evaluators): text_length = sum(len(s) for s in example["sentences"]) gold_spans = set(zip(gold_starts, gold_ends)) if len(candidate_starts) > 0: sorted_starts, sorted_ends, _ = zip(*sorted(zip(candidate_starts, candidate_ends, mention_scores), key=operator.itemgetter(2), reverse=True)) else: sorted_starts = [] sorted_ends = [] for k, evaluator in evaluators.items(): if k == -3: predicted_spans = set(zip(candidate_starts, candidate_ends)) & gold_spans else: if k == -2: predicted_starts = mention_starts predicted_ends = mention_ends elif k == 0: is_predicted = mention_scores > 0 predicted_starts = candidate_starts[is_predicted] predicted_ends = candidate_ends[is_predicted] else: if k == -1: num_predictions = len(gold_spans) else: num_predictions = (k * text_length) / 100 predicted_starts = sorted_starts[:num_predictions] predicted_ends = sorted_ends[:num_predictions] predicted_spans = set(zip(predicted_starts, predicted_ends)) evaluator.update(gold_set=gold_spans, predicted_set=predicted_spans) def get_predicted_antecedents(self, antecedents, antecedent_scores): predicted_antecedents = [] for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1): if index < 0: predicted_antecedents.append(-1) else: predicted_antecedents.append(antecedents[i, index]) return predicted_antecedents def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents): mention_to_predicted = {} predicted_clusters = [] for i, predicted_index in enumerate(predicted_antecedents): if predicted_index < 0: continue assert i > predicted_index predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index])) if predicted_antecedent in mention_to_predicted: predicted_cluster = mention_to_predicted[predicted_antecedent] else: predicted_cluster = len(predicted_clusters) predicted_clusters.append([predicted_antecedent]) mention_to_predicted[predicted_antecedent] = predicted_cluster mention = (int(mention_starts[i]), int(mention_ends[i])) predicted_clusters[predicted_cluster].append(mention) mention_to_predicted[mention] = predicted_cluster predicted_clusters = [tuple(pc) for pc in predicted_clusters] mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() } return predicted_clusters, mention_to_predicted def evaluate_coref(self, mention_starts, mention_ends, predicted_antecedents, gold_clusters, evaluator): gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters] mention_to_gold = {} for gc in gold_clusters: for mention in gc: mention_to_gold[mention] = gc predicted_clusters, mention_to_predicted = self.get_predicted_clusters(mention_starts, mention_ends, predicted_antecedents) evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold) return predicted_clusters def load_eval_data(self): if self.eval_data is None: oov_counts = [0 for _ in self.embedding_dicts] with open(self.config["eval_path"]) as f: self.eval_data = map(lambda example: (self.tensorize_example(example, is_training=False, oov_counts=oov_counts), example), (json.loads(jsonline) for jsonline in f.readlines())) num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data) for emb, c in zip(self.config["embeddings"], oov_counts): print("OOV rate for {}: {:.2f}%".format(emb["path"], (100.0 * c) / num_words)) print("Loaded {} eval examples.".format(len(self.eval_data))) def evaluate(self, session, official_stdout=False): self.load_eval_data() def _k_to_tag(k): if k == -3: return "oracle" elif k == -2: return "actual" elif k == -1: return "exact" elif k == 0: return "threshold" else: return "{}%".format(k) mention_evaluators = { k:util.RetrievalEvaluator() for k in [-3, -2, -1, 0, 10, 15, 20, 25, 30, 40, 50] } coref_predictions = {} coref_evaluator = metrics.CorefEvaluator() for example_num, (tensorized_example, example) in enumerate(self.eval_data): _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)} candidate_starts, candidate_ends, mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores = session.run(self.predictions, feed_dict=feed_dict) self.evaluate_mentions(candidate_starts, candidate_ends, mention_starts, mention_ends, mention_scores, gold_starts, gold_ends, example, mention_evaluators) predicted_antecedents = self.get_predicted_antecedents(antecedents, antecedent_scores) coref_predictions[example["doc_key"]] = self.evaluate_coref(mention_starts, mention_ends, predicted_antecedents, example["clusters"], coref_evaluator) if example_num % 10 == 0: print "Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)) summary_dict = {} for k, evaluator in sorted(mention_evaluators.items(), key=operator.itemgetter(0)): tags = ["{} @ {}".format(t, _k_to_tag(k)) for t in ("R", "P", "F")] results_to_print = [] for t, v in zip(tags, evaluator.metrics()): results_to_print.append("{:<10}: {:.2f}".format(t, v)) summary_dict[t] = v print ", ".join(results_to_print) conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout) average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results) summary_dict["Average F1 (conll)"] = average_f1 print "Average F1 (conll): {:.2f}%".format(average_f1) p,r,f = coref_evaluator.get_prf() summary_dict["Average F1 (py)"] = f print "Average F1 (py): {:.2f}%".format(f * 100) summary_dict["Average precision (py)"] = p print "Average precision (py): {:.2f}%".format(p * 100) summary_dict["Average recall (py)"] = r print "Average recall (py): {:.2f}%".format(r * 100) return util.make_summary(summary_dict), average_f1
test_advanced.py
# coding: utf-8 from concurrent.futures import ThreadPoolExecutor import json import logging import random import sys import threading import time import os import numpy as np import pytest import ray.cluster_utils import ray._private.profiling as profiling from ray._private.gcs_utils import use_gcs_for_bootstrap from ray._private.test_utils import ( client_test_enabled, RayTestTimeoutException, SignalActor, ) from ray.exceptions import ReferenceCountingAssertionError if client_test_enabled(): from ray.util.client import ray else: import ray logger = logging.getLogger(__name__) # issue https://github.com/ray-project/ray/issues/7105 @pytest.mark.skipif(client_test_enabled(), reason="internal api") def test_internal_free(shutdown_only): ray.init(num_cpus=1) @ray.remote class Sampler: def sample(self): return [1, 2, 3, 4, 5] def sample_big(self): return np.zeros(1024 * 1024) sampler = Sampler.remote() # Free deletes from in-memory store. obj_ref = sampler.sample.remote() ray.get(obj_ref) ray.internal.free(obj_ref) with pytest.raises(ReferenceCountingAssertionError): ray.get(obj_ref) # Free deletes big objects from plasma store. big_id = sampler.sample_big.remote() ray.get(big_id) ray.internal.free(big_id) time.sleep(1) # wait for delete RPC to propagate with pytest.raises(ReferenceCountingAssertionError): ray.get(big_id) def test_multiple_waits_and_gets(shutdown_only): # It is important to use three workers here, so that the three tasks # launched in this experiment can run at the same time. ray.init(num_cpus=3) @ray.remote def f(delay): time.sleep(delay) return 1 @ray.remote def g(input_list): # The argument input_list should be a list containing one object ref. ray.wait([input_list[0]]) @ray.remote def h(input_list): # The argument input_list should be a list containing one object ref. ray.get(input_list[0]) # Make sure that multiple wait requests involving the same object ref # all return. x = f.remote(1) ray.get([g.remote([x]), g.remote([x])]) # Make sure that multiple get requests involving the same object ref all # return. x = f.remote(1) ray.get([h.remote([x]), h.remote([x])]) @pytest.mark.skipif(client_test_enabled(), reason="internal api") def test_caching_functions_to_run(shutdown_only): # Test that we export functions to run on all workers before the driver # is connected. def f(worker_info): sys.path.append(1) ray.worker.global_worker.run_function_on_all_workers(f) def f(worker_info): sys.path.append(2) ray.worker.global_worker.run_function_on_all_workers(f) def g(worker_info): sys.path.append(3) ray.worker.global_worker.run_function_on_all_workers(g) def f(worker_info): sys.path.append(4) ray.worker.global_worker.run_function_on_all_workers(f) ray.init(num_cpus=1) @ray.remote def get_state(): time.sleep(1) return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1] res1 = get_state.remote() res2 = get_state.remote() assert ray.get(res1) == (1, 2, 3, 4) assert ray.get(res2) == (1, 2, 3, 4) # Clean up the path on the workers. def f(worker_info): sys.path.pop() sys.path.pop() sys.path.pop() sys.path.pop() ray.worker.global_worker.run_function_on_all_workers(f) @pytest.mark.skipif(client_test_enabled(), reason="internal api") def test_running_function_on_all_workers(ray_start_regular): def f(worker_info): sys.path.append("fake_directory") ray.worker.global_worker.run_function_on_all_workers(f) @ray.remote def get_path1(): return sys.path assert "fake_directory" == ray.get(get_path1.remote())[-1] # the function should only run on the current driver once. assert sys.path[-1] == "fake_directory" if len(sys.path) > 1: assert sys.path[-2] != "fake_directory" def f(worker_info): sys.path.pop(-1) ray.worker.global_worker.run_function_on_all_workers(f) # Create a second remote function to guarantee that when we call # get_path2.remote(), the second function to run will have been run on # the worker. @ray.remote def get_path2(): return sys.path assert "fake_directory" not in ray.get(get_path2.remote()) @pytest.mark.skipif( "RAY_PROFILING" not in os.environ, reason="Only tested in client/profiling build." ) @pytest.mark.skipif( client_test_enabled() and use_gcs_for_bootstrap(), reason=( "wait_for_function will miss in this mode. To be fixed after using" " gcs to bootstrap all component." ), ) def test_profiling_api(ray_start_2_cpus): @ray.remote def f(delay): with profiling.profile("custom_event", extra_data={"name": "custom name"}): time.sleep(delay) pass @ray.remote def g(input_list): # The argument input_list should be a list containing one object ref. ray.wait([input_list[0]]) ray.put(1) x = f.remote(1) ray.get([g.remote([x]), g.remote([x])]) # Wait until all of the profiling information appears in the profile # table. timeout_seconds = 20 start_time = time.time() while True: profile_data = ray.timeline() event_types = {event["cat"] for event in profile_data} expected_types = [ "task", "task:deserialize_arguments", "task:execute", "task:store_outputs", "wait_for_function", "ray.get", "ray.put", "ray.wait", "submit_task", "fetch_and_run_function", # TODO (Alex) :https://github.com/ray-project/ray/pull/9346 # "register_remote_function", "custom_event", # This is the custom one from ray.profile. ] if all(expected_type in event_types for expected_type in expected_types): break if time.time() - start_time > timeout_seconds: raise RayTestTimeoutException( "Timed out while waiting for information in " "profile table. Missing events: {}.".format( set(expected_types) - set(event_types) ) ) # The profiling information only flushes once every second. time.sleep(1.1) def test_wait_cluster(ray_start_cluster): cluster = ray_start_cluster cluster.add_node(num_cpus=1, resources={"RemoteResource": 1}) cluster.add_node(num_cpus=1, resources={"RemoteResource": 1}) ray.init(address=cluster.address) @ray.remote(resources={"RemoteResource": 1}) def f(): return # Make sure we have enough workers on the remote nodes to execute some # tasks. tasks = [f.remote() for _ in range(10)] start = time.time() ray.get(tasks) end = time.time() # Submit some more tasks that can only be executed on the remote nodes. tasks = [f.remote() for _ in range(10)] # Sleep for a bit to let the tasks finish. time.sleep((end - start) * 2) _, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0) # All remote tasks should have finished. assert len(unready) == 0 @pytest.mark.skip(reason="TODO(ekl)") def test_object_transfer_dump(ray_start_cluster): cluster = ray_start_cluster num_nodes = 3 for i in range(num_nodes): cluster.add_node(resources={str(i): 1}, object_store_memory=10 ** 9) ray.init(address=cluster.address) @ray.remote def f(x): return # These objects will live on different nodes. object_refs = [f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)] # Broadcast each object from each machine to each other machine. for object_ref in object_refs: ray.get( [ f._remote(args=[object_ref], resources={str(i): 1}) for i in range(num_nodes) ] ) # The profiling information only flushes once every second. time.sleep(1.1) transfer_dump = ray.state.object_transfer_timeline() # Make sure the transfer dump can be serialized with JSON. json.loads(json.dumps(transfer_dump)) assert len(transfer_dump) >= num_nodes ** 2 assert ( len( { event["pid"] for event in transfer_dump if event["name"] == "transfer_receive" } ) == num_nodes ) assert ( len( { event["pid"] for event in transfer_dump if event["name"] == "transfer_send" } ) == num_nodes ) def test_identical_function_names(ray_start_regular): # Define a bunch of remote functions and make sure that we don't # accidentally call an older version. num_calls = 200 @ray.remote def f(): return 1 results1 = [f.remote() for _ in range(num_calls)] @ray.remote def f(): return 2 results2 = [f.remote() for _ in range(num_calls)] @ray.remote def f(): return 3 results3 = [f.remote() for _ in range(num_calls)] @ray.remote def f(): return 4 results4 = [f.remote() for _ in range(num_calls)] @ray.remote def f(): return 5 results5 = [f.remote() for _ in range(num_calls)] assert ray.get(results1) == num_calls * [1] assert ray.get(results2) == num_calls * [2] assert ray.get(results3) == num_calls * [3] assert ray.get(results4) == num_calls * [4] assert ray.get(results5) == num_calls * [5] @ray.remote def g(): return 1 @ray.remote # noqa: F811 def g(): # noqa: F811 return 2 @ray.remote # noqa: F811 def g(): # noqa: F811 return 3 @ray.remote # noqa: F811 def g(): # noqa: F811 return 4 @ray.remote # noqa: F811 def g(): # noqa: F811 return 5 result_values = ray.get([g.remote() for _ in range(num_calls)]) assert result_values == num_calls * [5] def test_illegal_api_calls(ray_start_regular): # Verify that we cannot call put on an ObjectRef. x = ray.put(1) with pytest.raises(Exception): ray.put(x) # Verify that we cannot call get on a regular value. with pytest.raises(Exception): ray.get(3) @pytest.mark.skipif( client_test_enabled(), reason="grpc interaction with releasing resources" ) def test_multithreading(ray_start_2_cpus): # This test requires at least 2 CPUs to finish since the worker does not # release resources when joining the threads. def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25): """A helper function that runs test cases in multiple threads.""" def wrapper(): for _ in range(num_repeats): test_case() time.sleep(random.randint(0, 10) / 1000.0) return "ok" executor = ThreadPoolExecutor(max_workers=num_threads) futures = [executor.submit(wrapper) for _ in range(num_threads)] for future in futures: assert future.result() == "ok" @ray.remote def echo(value, delay_ms=0): if delay_ms > 0: time.sleep(delay_ms / 1000.0) return value def test_api_in_multi_threads(): """Test using Ray api in multiple threads.""" @ray.remote class Echo: def echo(self, value): return value # Test calling remote functions in multiple threads. def test_remote_call(): value = random.randint(0, 1000000) result = ray.get(echo.remote(value)) assert value == result run_test_in_multi_threads(test_remote_call) # Test multiple threads calling one actor. actor = Echo.remote() def test_call_actor(): value = random.randint(0, 1000000) result = ray.get(actor.echo.remote(value)) assert value == result run_test_in_multi_threads(test_call_actor) # Test put and get. def test_put_and_get(): value = random.randint(0, 1000000) result = ray.get(ray.put(value)) assert value == result run_test_in_multi_threads(test_put_and_get) # Test multiple threads waiting for objects. num_wait_objects = 10 objects = [echo.remote(i, delay_ms=10) for i in range(num_wait_objects)] def test_wait(): ready, _ = ray.wait( objects, num_returns=len(objects), timeout=1000.0, ) assert len(ready) == num_wait_objects assert ray.get(ready) == list(range(num_wait_objects)) run_test_in_multi_threads(test_wait, num_repeats=1) # Run tests in a driver. test_api_in_multi_threads() # Run tests in a worker. @ray.remote def run_tests_in_worker(): test_api_in_multi_threads() return "ok" assert ray.get(run_tests_in_worker.remote()) == "ok" # Test actor that runs background threads. @ray.remote class MultithreadedActor: def __init__(self): self.lock = threading.Lock() self.thread_results = [] def background_thread(self, wait_objects): try: # Test wait ready, _ = ray.wait( wait_objects, num_returns=len(wait_objects), timeout=1000.0, ) assert len(ready) == len(wait_objects) for _ in range(20): num = 10 # Test remote call results = [echo.remote(i) for i in range(num)] assert ray.get(results) == list(range(num)) # Test put and get objects = [ray.put(i) for i in range(num)] assert ray.get(objects) == list(range(num)) time.sleep(random.randint(0, 10) / 1000.0) except Exception as e: with self.lock: self.thread_results.append(e) else: with self.lock: self.thread_results.append("ok") def spawn(self): wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)] self.threads = [ threading.Thread(target=self.background_thread, args=(wait_objects,)) for _ in range(20) ] [thread.start() for thread in self.threads] def join(self): [thread.join() for thread in self.threads] assert self.thread_results == ["ok"] * len(self.threads) return "ok" actor = MultithreadedActor.remote() actor.spawn.remote() ray.get(actor.join.remote()) == "ok" @pytest.mark.skipif(client_test_enabled(), reason="internal api") def test_wait_makes_object_local(ray_start_cluster): cluster = ray_start_cluster cluster.add_node(num_cpus=0) cluster.add_node(num_cpus=2) ray.init(address=cluster.address) @ray.remote class Foo: def method(self): return np.zeros(1024 * 1024) a = Foo.remote() # Test get makes the object local. x_id = a.method.remote() assert not ray.worker.global_worker.core_worker.object_exists(x_id) ray.get(x_id) assert ray.worker.global_worker.core_worker.object_exists(x_id) # Test wait makes the object local. x_id = a.method.remote() assert not ray.worker.global_worker.core_worker.object_exists(x_id) ok, _ = ray.wait([x_id]) assert len(ok) == 1 assert ray.worker.global_worker.core_worker.object_exists(x_id) @pytest.mark.skipif(client_test_enabled(), reason="internal api") def test_future_resolution_skip_plasma(ray_start_cluster): cluster = ray_start_cluster # Disable worker caching so worker leases are not reused; set object # inlining size threshold so the borrowed ref is inlined. cluster.add_node( num_cpus=1, resources={"pin_head": 1}, _system_config={ "worker_lease_timeout_milliseconds": 0, "max_direct_call_object_size": 100 * 1024, }, ) cluster.add_node(num_cpus=1, resources={"pin_worker": 1}) ray.init(address=cluster.address) @ray.remote(resources={"pin_head": 1}) def f(x): return x + 1 @ray.remote(resources={"pin_worker": 1}) def g(x): borrowed_ref = x[0] f_ref = f.remote(borrowed_ref) f_result = ray.get(f_ref) # borrowed_ref should be inlined on future resolution and shouldn't be # in Plasma. assert ray.worker.global_worker.core_worker.object_exists( borrowed_ref, memory_store_only=True ) return f_result * 2 one = f.remote(0) g_ref = g.remote([one]) assert ray.get(g_ref) == 4 def test_task_output_inline_bytes_limit(ray_start_cluster): cluster = ray_start_cluster # Disable worker caching so worker leases are not reused; set object # inlining size threshold and enable storing of small objects in in-memory # object store so the borrowed ref is inlined. # set task_rpc_inlined_bytes_limit which only allows inline 20 bytes. cluster.add_node( num_cpus=1, resources={"pin_head": 1}, _system_config={ "worker_lease_timeout_milliseconds": 0, "max_direct_call_object_size": 100 * 1024, "task_rpc_inlined_bytes_limit": 20, }, ) cluster.add_node(num_cpus=1, resources={"pin_worker": 1}) ray.init(address=cluster.address) @ray.remote(num_returns=5, resources={"pin_head": 1}) def f(): return list(range(5)) @ray.remote(resources={"pin_worker": 1}) def sum(): numbers = f.remote() result = 0 for i, ref in enumerate(numbers): result += ray.get(ref) inlined = ray.worker.global_worker.core_worker.object_exists( ref, memory_store_only=True ) if i < 2: assert inlined else: assert not inlined return result assert ray.get(sum.remote()) == 10 def test_task_arguments_inline_bytes_limit(ray_start_cluster): cluster = ray_start_cluster cluster.add_node( num_cpus=1, resources={"pin_head": 1}, _system_config={ "max_direct_call_object_size": 100 * 1024, # if task_rpc_inlined_bytes_limit is greater than # max_grpc_message_size, this test fails. "task_rpc_inlined_bytes_limit": 18 * 1024, "max_grpc_message_size": 20 * 1024, }, ) cluster.add_node(num_cpus=1, resources={"pin_worker": 1}) ray.init(address=cluster.address) @ray.remote(resources={"pin_worker": 1}) def foo(ref1, ref2, ref3): return ref1 == ref2 + ref3 @ray.remote(resources={"pin_head": 1}) def bar(): # if the refs are inlined, the test fails. # refs = [ray.put(np.random.rand(1024) for _ in range(3))] # return ray.get( # foo.remote(refs[0], refs[1], refs[2])) return ray.get( foo.remote( np.random.rand(1024), # 8k np.random.rand(1024), # 8k np.random.rand(1024), ) ) # 8k ray.get(bar.remote()) # This case tests whether gcs-based actor scheduler works properly with # a normal task co-existed. @pytest.mark.skip(reason="The resource update of normal task has been broken.") def test_schedule_actor_and_normal_task(ray_start_cluster): cluster = ray_start_cluster cluster.add_node( memory=1024 ** 3, _system_config={"gcs_actor_scheduling_enabled": True} ) ray.init(address=cluster.address) cluster.wait_for_nodes() @ray.remote(memory=600 * 1024 ** 2, num_cpus=0.01) class Foo: def method(self): return 2 @ray.remote(memory=600 * 1024 ** 2, num_cpus=0.01) def fun(singal1, signal_actor2): signal_actor2.send.remote() ray.get(singal1.wait.remote()) return 1 singal1 = SignalActor.remote() signal2 = SignalActor.remote() o1 = fun.remote(singal1, signal2) # Make sure the normal task is executing. ray.get(signal2.wait.remote()) # The normal task is blocked now. # Try to create actor and make sure this actor is not created for the time # being. foo = Foo.remote() o2 = foo.method.remote() ready_list, remaining_list = ray.wait([o2], timeout=2) assert len(ready_list) == 0 and len(remaining_list) == 1 # Send a signal to unblock the normal task execution. ray.get(singal1.send.remote()) # Check the result of normal task. assert ray.get(o1) == 1 # Make sure the actor is created. assert ray.get(o2) == 2 # This case tests whether gcs-based actor scheduler works properly # in a large scale. @pytest.mark.skip(reason="The resource update of normal task has been broken.") def test_schedule_many_actors_and_normal_tasks(ray_start_cluster): cluster = ray_start_cluster node_count = 10 actor_count = 50 each_actor_task_count = 50 normal_task_count = 1000 node_memory = 2 * 1024 ** 3 for i in range(node_count): cluster.add_node( memory=node_memory, _system_config={"gcs_actor_scheduling_enabled": True} if i == 0 else {}, ) ray.init(address=cluster.address) cluster.wait_for_nodes() @ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01) class Foo: def method(self): return 2 @ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01) def fun(): return 1 normal_task_object_list = [fun.remote() for _ in range(normal_task_count)] actor_list = [Foo.remote() for _ in range(actor_count)] actor_object_list = [ actor.method.remote() for _ in range(each_actor_task_count) for actor in actor_list ] for object in ray.get(actor_object_list): assert object == 2 for object in ray.get(normal_task_object_list): assert object == 1 # This case tests whether gcs-based actor scheduler distributes actors # in a balanced way. By default, it uses the `SPREAD` strategy of # gcs resource scheduler. @pytest.mark.skip(reason="The resource update of normal task has been broken.") @pytest.mark.parametrize("args", [[5, 20], [5, 3]]) def test_actor_distribution_balance(ray_start_cluster, args): cluster = ray_start_cluster node_count = args[0] actor_count = args[1] for i in range(node_count): cluster.add_node( memory=1024 ** 3, _system_config={"gcs_actor_scheduling_enabled": True} if i == 0 else {}, ) ray.init(address=cluster.address) cluster.wait_for_nodes() @ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01) class Foo: def method(self): return ray.worker.global_worker.node.unique_id actor_distribution = {} actor_list = [Foo.remote() for _ in range(actor_count)] for actor in actor_list: node_id = ray.get(actor.method.remote()) if node_id not in actor_distribution.keys(): actor_distribution[node_id] = [] actor_distribution[node_id].append(actor) if node_count >= actor_count: assert len(actor_distribution) == actor_count for node_id, actors in actor_distribution.items(): assert len(actors) == 1 else: assert len(actor_distribution) == node_count for node_id, actors in actor_distribution.items(): assert len(actors) <= int(actor_count / node_count) # This case tests whether RequestWorkerLeaseReply carries normal task resources # when the request is rejected (due to resource preemption by normal tasks). @pytest.mark.skip(reason="The resource update of normal task has been broken.") def test_worker_lease_reply_with_resources(ray_start_cluster): cluster = ray_start_cluster cluster.add_node( memory=2000 * 1024 ** 2, _system_config={ "gcs_resource_report_poll_period_ms": 1000000, "gcs_actor_scheduling_enabled": True, }, ) node2 = cluster.add_node(memory=1000 * 1024 ** 2) ray.init(address=cluster.address) cluster.wait_for_nodes() @ray.remote(memory=1500 * 1024 ** 2) def fun(signal): signal.send.remote() time.sleep(30) return 0 signal = SignalActor.remote() fun.remote(signal) # Make sure that the `fun` is running. ray.get(signal.wait.remote()) @ray.remote(memory=800 * 1024 ** 2) class Foo: def method(self): return ray.worker.global_worker.node.unique_id foo1 = Foo.remote() o1 = foo1.method.remote() ready_list, remaining_list = ray.wait([o1], timeout=10) # If RequestWorkerLeaseReply carries normal task resources, # GCS will then schedule foo1 to node2. Otherwise, # GCS would keep trying to schedule foo1 to # node1 and getting rejected. assert len(ready_list) == 1 and len(remaining_list) == 0 assert ray.get(o1) == node2.unique_id if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
run.py
#! /usr/bin/env python3 # # Copyright (C) 2017-2020 Open Information Security Foundation # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import print_function import sys import os import os.path import subprocess import threading import shutil import string import argparse import yaml import glob import re import json import unittest import multiprocessing as mp from collections import namedtuple import threading import yaml WIN32 = sys.platform == "win32" LINUX = sys.platform.startswith("linux") suricata_bin = "src\suricata.exe" if WIN32 else "./src/suricata" suricata_yaml = "suricata.yaml" if WIN32 else "./suricata.yaml" if LINUX: manager = mp.Manager() lock = mp.Lock() failedLogs = manager.list() count_dict = manager.dict() check_args = manager.dict() else: failedLogs = [] count_dict = {} check_args = {} # Bring in a lock from threading to satisfy the MP semantics when # not using MP. lock = threading.Lock() count_dict['passed'] = 0 count_dict['failed'] = 0 count_dict['skipped'] = 0 check_args['fail'] = 0 class SelfTest(unittest.TestCase): def test_parse_suricata_version(self): version = parse_suricata_version("4.0.0") self.assertEqual( (4, 0, 0), (version.major, version.minor, version.patch)) version = parse_suricata_version("444.444.444") self.assertEqual( (444, 444, 444), (version.major, version.minor, version.patch)) version = parse_suricata_version("4.1.0-dev") self.assertEqual( (4, 1, 0), (version.major, version.minor, version.patch)) version = parse_suricata_version("4") self.assertEqual( (4, 0, 0), (version.major, version.minor, version.patch)) version = parse_suricata_version("4.0.3") self.assertEqual( (4, 0, 3), (version.major, version.minor, version.patch)) def test_version_equal(self): self.assertTrue(Version().is_equal(SuricataVersion(5, 0, 0), SuricataVersion(5, 0, 0))) self.assertTrue(Version().is_equal(SuricataVersion(5, 1, 0), SuricataVersion(5, None, None))) self.assertFalse(Version().is_equal(SuricataVersion(4, 1, 0), SuricataVersion(5, None, None))) def test_version_lt(self): comp = Version() self.assertTrue(comp.is_lt(SuricataVersion(5, 0, 3), SuricataVersion(6, None, None))) self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 0, 1))) self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 1, 1))) self.assertFalse(comp.is_lt(SuricataVersion(6, 1, 2), SuricataVersion(6, 1, 1))) self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(7, 0, 0))) class TestError(Exception): pass class UnsatisfiedRequirementError(Exception): pass class TerminatePoolError(Exception): pass SuricataVersion = namedtuple( "SuricataVersion", ["major", "minor", "patch"]) def parse_suricata_version(buf): m = re.search("(?:Suricata version |^)(\d+)\.?(\d+)?\.?(\d+)?.*", str(buf).strip()) if m: major = int(m.group(1)) if m.group(1) else 0 minor = int(m.group(2)) if m.group(2) else 0 patch = int(m.group(3)) if m.group(3) else 0 return SuricataVersion( major=major, minor=minor, patch=patch) return None def get_suricata_version(): output = subprocess.check_output([suricata_bin, "-V"]) return parse_suricata_version(output) def pipe_reader(fileobj, output=None, verbose=False): for line in fileobj: line = line.decode() if output: output.write(line) output.flush() if verbose: print(line.strip()) def handle_exceptions(func): def applicator(*args, **kwargs): result = False try: result = func(*args,**kwargs) except TestError as te: print("===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], te)) check_args_fail() kwargs["count"]["failure"] += 1 except UnsatisfiedRequirementError as ue: print("===> {}: Sub test #{}: SKIPPED : {}".format(kwargs["test_name"], kwargs["test_num"], ue)) kwargs["count"]["skipped"] += 1 else: if result: kwargs["count"]["success"] += 1 else: print("\n===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], kwargs["check"]["args"])) kwargs["count"]["failure"] += 1 return kwargs["count"] return applicator class Version: """ Class to compare Suricata versions. """ def is_equal(self, a, b): """Check if version a and version b are equal in a semantic way. For example: - 4 would match 4, 4.x and 4.x.y. - 4.0 would match 4.0.x. - 4.0.3 would match only 4.0.3. """ if not a.major == b.major: return False if a.minor is not None and b.minor is not None: if a.minor != b.minor: return False if a.patch is not None and b.patch is not None: if a.patch != b.patch: return False return True def is_gte(self, v1, v2): """Return True if v1 is great than or equal to v2.""" if v1.major < v2.major: return False elif v1.major > v2.major: return True if v1.minor < v2.minor: return False elif v1.minor > v2.minor: return True if v1.patch < v2.patch: return False return True def is_lt(self, v1, v2): """Return True if v1 is less than v2.""" if v1.major < v2.major: return True elif v1.minor < v2.minor: return True elif v1.patch < v2.patch: return True return False class SuricataConfig: def __init__(self, version): self.version = version self.features = set() self.config = {} self.load_build_info() def load_build_info(self): output = subprocess.check_output([suricata_bin, "--build-info"]) start_support = False for line in output.splitlines(): if line.decode().startswith("Features:"): self.features = set(line.decode().split()[1:]) if "Suricata Configuration" in line.decode(): start_support = True if start_support and "support:" in line.decode(): (fkey, val) = line.decode().split(" support:") fkey = fkey.strip() val = val.strip() if val.startswith("yes"): self.features.add(fkey) def load_config(self, config_filename): output = subprocess.check_output([ suricata_bin, "-c", config_filename, "--dump-config"]) self.config = {} for line in output.decode("utf-8").split("\n"): parts = [p.strip() for p in line.split("=", 1)] if parts and parts[0]: if len(parts) > 1: val = parts[1] else: val = "" self.config[parts[0]] = val def has_feature(self, feature): return feature in self.features def find_value(name, obj): """Find the value in an object for a field specified by name. Example names: event_type alert.signature_id smtp.rcpt_to[0] """ parts = name.split(".") for part in parts: if part == "__len": # Get the length of the object. Return -1 if the object is # not a type that has a length (numbers). try: return len(obj) except: return -1 name = None index = None m = re.match("^(.*)\[(\d+)\]$", part) if m: name = m.group(1) index = m.group(2) else: name = part if not name in obj: return None obj = obj[name] if index is not None: try: obj = obj[int(index)] except: return None return obj def is_version_compatible(version, suri_version, expr): config_version = parse_suricata_version(version) version_obj = Version() func = getattr(version_obj, "is_{}".format(expr)) if not func(suri_version, config_version): return False return True class ShellCheck: def __init__(self, config): self.config = config def run(self): if not self.config or "args" not in self.config: raise TestError("shell check missing args") try: if WIN32: print("skipping shell check on windows") return True; output = subprocess.check_output(self.config["args"], shell=True) if "expect" in self.config: return str(self.config["expect"]) == output.decode().strip() return True except subprocess.CalledProcessError as err: raise TestError(err) class StatsCheck: def __init__(self, config, outdir): self.config = config self.outdir = outdir def run(self): stats = None with open("eve.json", "r") as fileobj: for line in fileobj: event = json.loads(line) if event["event_type"] == "stats": stats = event["stats"] for key in self.config: val = find_value(key, stats) if val != self.config[key]: raise TestError("stats.%s: expected %s; got %s" % ( key, str(self.config[key]), str(val))) return True class FilterCheck: def __init__(self, config, outdir, suricata_config): self.config = config self.outdir = outdir self.suricata_config = suricata_config self.suri_version = suricata_config.version def run(self): req_version = self.config.get("version") min_version = self.config.get("min-version") expr = "equal" if req_version else "gte" if (req_version == None) ^ (min_version == None): version = req_version or min_version if not is_version_compatible(version=version, suri_version=self.suri_version, expr=expr): raise UnsatisfiedRequirementError( "Suricata v{} not found".format(version)) elif req_version and min_version: raise TestError("Specify either min-version or version") feature = self.config.get("feature") if feature != None: if not self.suricata_config.has_feature(feature): raise UnsatisfiedRequirementError( "Suricata feature {} not present".format(feature)) if "filename" in self.config: json_filename = self.config["filename"] else: json_filename = "eve.json" if not os.path.exists(json_filename): raise TestError("%s does not exist" % (json_filename)) count = 0 with open(json_filename, "r") as fileobj: for line in fileobj: event = json.loads(line) if self.match(event): count += 1 if count == self.config["count"]: return True if "comment" in self.config: raise TestError("%s: expected %d, got %d" % ( self.config["comment"], self.config["count"], count)) raise TestError("expected %d matches; got %d for filter %s" % ( self.config["count"], count, str(self.config))) def match(self, event): for key, expected in self.config["match"].items(): if key == "has-key": val = find_value(expected, event) if val is None: return False elif key == "not-has-key": val = find_value(expected, event) if val is not None: return False else: val = find_value(key, event) if val != expected: if str(val) == str(expected): print("Different types but same string", type(val), val, type(expected), expected) return False return False return True class TestRunner: def __init__(self, cwd, directory, outdir, suricata_config, verbose=False, force=False): self.cwd = cwd self.directory = directory self.suricata_config = suricata_config self.verbose = verbose self.force = force self.output = outdir # The name is just the directory name. self.name = os.path.basename(self.directory) # List of thread readers. self.readers = [] # Load the test configuration. self.config = None self.load_config() self.suricata_config.load_config(self.get_suricata_yaml_path()) def load_config(self): if os.path.exists(os.path.join(self.directory, "test.yaml")): self.config = yaml.safe_load( open(os.path.join(self.directory, "test.yaml"), "rb")) if self.config is None: self.config = {} def setup(self): if "setup" in self.config: for setup in self.config["setup"]: for command in setup: if command == "script": subprocess.check_call( "%s" % setup[command], shell=True, cwd=self.output) def check_skip(self): if not "skip" in self.config: return if isinstance(self.config["skip"], bool): if self.config["skip"]: raise UnsatisfiedRequirementError("skipped by default") return for skip in self.config["skip"]: if "uid" in skip: if WIN32: raise UnsatisfiedRequirementError("uid based skip not supported on Windows") if os.getuid() == skip["uid"]: if "msg" in skip: msg = skip["msg"] else: msg = "not for uid %d" % (skip["uid"]) raise UnsatisfiedRequirementError(msg) if "feature" in skip: if self.suricata_config.has_feature(skip["feature"]): if "msg" in skip: msg = skip["msg"] else: msg = "not for feature %s" % (skip["feature"]) raise UnsatisfiedRequirementError(msg) if "config" in skip: for pattern, need_val in skip["config"].items(): for key, val in self.suricata_config.config.items(): if re.match(pattern, key): if str(need_val) == str(val): raise UnsatisfiedRequirementError( "not for %s = %s" % ( key, need_val)) def check_requires(self): if "requires" in self.config: requires = self.config["requires"] if not requires: return True else: requires = {} suri_version = self.suricata_config.version for key in requires: if key == "min-version": min_version = requires["min-version"] if not is_version_compatible(version=min_version, suri_version=suri_version, expr="gte"): raise UnsatisfiedRequirementError( "requires at least version {}".format(min_version)) elif key == "lt-version": lt_version = requires["lt-version"] if not is_version_compatible(version=lt_version, suri_version=suri_version, expr="lt"): raise UnsatisfiedRequirementError( "for version less than {}".format(lt_version)) elif key == "version": req_version = requires["version"] if not is_version_compatible(version=req_version, suri_version=suri_version, expr="equal"): raise UnsatisfiedRequirementError( "only for version {}".format(req_version)) elif key == "features": for feature in requires["features"]: if not self.suricata_config.has_feature(feature): raise UnsatisfiedRequirementError( "requires feature %s" % (feature)) elif key == "env": for env in requires["env"]: if not env in os.environ: raise UnsatisfiedRequirementError( "requires env var %s" % (env)) elif key == "files": for filename in requires["files"]: if not os.path.exists(filename): raise UnsatisfiedRequirementError( "requires file %s" % (filename)) elif key == "script": for script in requires["script"]: try: subprocess.check_call("%s" % script, shell=True) except: raise UnsatisfiedRequirementError( "requires script returned false") elif key == "config": for pattern, need_val in requires["config"].items(): found = False for key, val in self.suricata_config.config.items(): if re.match(pattern, key): print("%s -> %s" % (pattern, key)) if str(need_val) != str(val): raise UnsatisfiedRequirementError( "requires %s = %s" % ( key, need_val)) print(found) if not found: raise UnsatisfiedRequirementError( "requires %s = %s" % (pattern, need_val)) elif key == "pcap": # Handle below... pass else: raise Exception("unknown requires types: %s" % (key)) # Check if a pcap is required or not. By default a pcap is # required unless a "command" has been provided. if not "command" in self.config: if "pcap" in requires: pcap_required = requires["pcap"] else: pcap_required = True if pcap_required and not "pcap" in self.config: if not glob.glob(os.path.join(self.directory, "*.pcap")) + \ glob.glob(os.path.join(self.directory, "*.pcapng")): raise UnsatisfiedRequirementError("No pcap file found") def run(self): if not self.force: self.check_requires() self.check_skip() if WIN32 and os.path.exists(os.path.join(self.directory, "check.sh")): raise UnsatisfiedRequirementError("check.sh tests are not supported on Windows") if WIN32 and "setup" in self.config: raise UnsatisfiedRequirementError("test \"setup\" not supported on Windows") shell = False if "command" in self.config: # on Windows skip 'command' tests if WIN32: raise UnsatisfiedRequirementError("\"command\" tests are not supported on Windows") args = self.config["command"] shell = True else: args = self.default_args() extraenv = { # The suricata source directory. "SRCDIR": self.cwd, "TZ": "UTC", "TEST_DIR": self.directory, "OUTPUT_DIR": self.output, "ASAN_OPTIONS": "detect_leaks=0", } env = os.environ.copy() env.update(extraenv) if "count" in self.config: count = self.config["count"] else: count = 1 if "exit-code" in self.config: expected_exit_code = self.config["exit-code"] else: expected_exit_code = 0 for _ in range(count): # Cleanup the output directory. if os.path.exists(self.output): shutil.rmtree(self.output) os.makedirs(self.output) self.setup() stdout = open(os.path.join(self.output, "stdout"), "w") stderr = open(os.path.join(self.output, "stderr"), "w") if shell: template = string.Template(args) cmdline = template.substitute(env) else: cmdline = " ".join(args) + "\n" open(os.path.join(self.output, "cmdline"), "w").write(cmdline) p = subprocess.Popen( args, shell=shell, cwd=self.directory, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.start_reader(p.stdout, stdout) self.start_reader(p.stderr, stderr) for r in self.readers: r.join() r = p.wait() if r != expected_exit_code: raise TestError("got exit code %d, expected %d" % ( r, expected_exit_code)); check_value = self.check() if check_value["check_sh"]: return check_value if not check_value["failure"] and not check_value["skipped"]: print("===> %s: OK%s" % (os.path.basename(self.directory), " (%dx)" % count if count > 1 else "")) elif not check_value["failure"]: print("===> {}: OK (checks: {}, skipped: {})".format(os.path.basename(self.directory), sum(check_value.values()), check_value["skipped"])) return check_value def pre_check(self): if "pre-check" in self.config: subprocess.call(self.config["pre-check"], shell=True) @handle_exceptions def perform_filter_checks(self, check, count, test_num, test_name): count = FilterCheck(check, self.output, self.suricata_config).run() return count @handle_exceptions def perform_shell_checks(self, check, count, test_num, test_name): count = ShellCheck(check).run() return count @handle_exceptions def perform_stats_checks(self, check, count, test_num, test_name): count = StatsCheck(check, self.output).run() return count def reset_count(self, dictionary): for k in dictionary.keys(): dictionary[k] = 0 def check(self): pdir = os.getcwd() os.chdir(self.output) count = { "success": 0, "failure": 0, "skipped": 0, "check_sh": 0, } try: self.pre_check() if "checks" in self.config: self.reset_count(count) for check_count, check in enumerate(self.config["checks"]): for key in check: if key in ["filter", "shell", "stats"]: func = getattr(self, "perform_{}_checks".format(key)) count = func(check=check[key], count=count, test_num=check_count + 1, test_name=os.path.basename(self.directory)) else: print("FAIL: Unknown check type: {}".format(key)) finally: os.chdir(pdir) if count["failure"] or count["skipped"]: return count # Old style check script. pdir = os.getcwd() os.chdir(self.output) try: if not os.path.exists(os.path.join(self.directory, "check.sh")): success_c = count["success"] # Covering cases like "tests/show-help" which do not have # check.sh and/or no checks in test.yaml should be counted # successful count["success"] = 1 if not success_c else success_c return count extraenv = { # The suricata source directory. "SRCDIR": self.cwd, "TZ": "UTC", "TEST_DIR": self.directory, "OUTPUT_DIR": self.output, "TOPDIR": TOPDIR, } env = os.environ.copy() env.update(extraenv) r = subprocess.call( [os.path.join(self.directory, "check.sh")], env=env) if r != 0: print("FAILED: verification failed") count["failure"] = 1 count["check_sh"] = 1 return count else: count["success"] = 1 return count finally: os.chdir(pdir) def default_args(self): args = [] if self.suricata_config.valgrind: suppression_opt = "--suppressions=%s" % os.path.join(self.cwd, "qa/valgrind.suppress") args += [ "valgrind", "-v", "--error-exitcode=255", suppression_opt ] args += [ os.path.join(self.cwd, "src/suricata"), ] # Load args from config file. if "args" in self.config: assert(type(self.config["args"]) == type([])) for arg in self.config["args"]: args += re.split("\s", arg) # In Suricata 5.0 the classification.config and # reference.config were moved into the etc/ directory. For now # check there and the top level directory to still support # 4.1. classification_configs = [ os.path.join(self.cwd, "etc", "classification.config"), os.path.join(self.cwd, "classification.config"), ] for config in classification_configs: if os.path.exists(config): args += ["--set", "classification-file=%s" % config] break reference_configs = [ os.path.join(self.cwd, "etc", "reference.config"), os.path.join(self.cwd, "reference.config"), ] for config in reference_configs: if os.path.exists(config): args += ["--set", "reference-config-file=%s" % config] break # Add other fixed arguments. args += [ "--init-errors-fatal", "-l", self.output, ] if "ips" in self.name: args.append("--simulate-ips") args += ["-c", self.get_suricata_yaml_path()] # Find pcaps. if "pcap" in self.config: args += ["-r", self.config["pcap"]] else: pcaps = glob.glob(os.path.join(self.directory, "*.pcap")) pcaps += glob.glob(os.path.join(self.directory, "*.pcapng")) if len(pcaps) > 1: raise TestError("More than 1 pcap file found") if pcaps: args += ["-r", pcaps[0]] # Find rules. rules = glob.glob(os.path.join(self.directory, "*.rules")) if not rules: args.append("--disable-detection") elif len(rules) == 1: args += ["-S", rules[0]] else: raise TestError("More than 1 rule file found") return args def get_suricata_yaml_path(self): """Return the path to the suricata.yaml that will be used for this test.""" if os.path.exists(os.path.join(self.directory, "suricata.yaml")): return os.path.join(self.directory, "suricata.yaml") return os.path.join(self.cwd, "suricata.yaml") def start_reader(self, input, output): t = threading.Thread( target=pipe_reader, args=(input, output, self.verbose)) t.start() self.readers.append(t) def check_args_fail(): if args.fail: with lock: check_args['fail'] = 1 def check_deps(): try: cmd = "jq --version > nil" if WIN32 else "jq --version > /dev/null 2>&1" subprocess.check_call(cmd, shell=True) except: print("error: jq is required") return False try: cmd = "echo suricata | xargs > nil" if WIN32 else "echo | xargs > /dev/null 2>&1" subprocess.check_call(cmd, shell=True) except: print("error: xargs is required") return False return True def run_test(dirpath, args, cwd, suricata_config): with lock: if check_args['fail'] == 1: raise TerminatePoolError() name = os.path.basename(dirpath) outdir = os.path.join(dirpath, "output") if args.outdir: outdir = os.path.join(os.path.realpath(args.outdir), name, "output") test_runner = TestRunner( cwd, dirpath, outdir, suricata_config, args.verbose, args.force) try: results = test_runner.run() if results["failure"] > 0: with lock: count_dict["failed"] += 1 failedLogs.append(dirpath) elif results["skipped"] > 0 and results["success"] == 0: with lock: count_dict["skipped"] += 1 elif results["success"] > 0: with lock: count_dict["passed"] += 1 except UnsatisfiedRequirementError as ue: print("===> {}: SKIPPED: {}".format(os.path.basename(dirpath), ue)) with lock: count_dict["skipped"] += 1 except TestError as te: print("===> {}: FAILED: {}".format(os.path.basename(dirpath), te)) check_args_fail() with lock: count_dict["failed"] += 1 def run_mp(jobs, tests, dirpath, args, cwd, suricata_config): print("Number of concurrent jobs: %d" % jobs) pool = mp.Pool(jobs) try: for dirpath in tests: pool.apply_async(run_test, args=(dirpath, args, cwd, suricata_config)) except TerminatePoolError: pool.terminate() pool.close() pool.join() def run_single(tests, dirpath, args, cwd, suricata_config): try: for dirpath in tests: run_test(dirpath, args, cwd, suricata_config) except TerminatePoolError: sys.exit(1) def main(): global TOPDIR global args if not check_deps(): return 1 parser = argparse.ArgumentParser(description="Verification test runner.") parser.add_argument("-v", dest="verbose", action="store_true") parser.add_argument("--force", dest="force", action="store_true", help="Force running of skipped tests") parser.add_argument("--fail", action="store_true", help="Exit on test failure") parser.add_argument("--testdir", action="store", help="Runs tests from custom directory") parser.add_argument("--exact", dest="exact", action="store_true", help="Use supplied name to make an exact match") parser.add_argument("--skip-tests", nargs="?", default=None, help="Skip tests with a given pattern") parser.add_argument("--outdir", action="store", help="Outputs to custom directory") parser.add_argument("--valgrind", dest="valgrind", action="store_true", help="Run tests in with valgrind") parser.add_argument("--self-test", action="store_true", help="Run self tests") parser.add_argument("--debug-failed", dest="debugfailed", action="store_true", help="Prints debug output for failed tests") parser.add_argument("patterns", nargs="*", default=[]) if LINUX: parser.add_argument("-j", type=int, default=min(8, mp.cpu_count()), help="Number of jobs to run") args = parser.parse_args() if args.self_test: return unittest.main(argv=[sys.argv[0]]) TOPDIR = os.path.abspath(os.path.dirname(sys.argv[0])) skipped = 0 passed = 0 failed = 0 # Get the current working directory, which should be the top # suricata source directory. cwd = os.getcwd() if not (os.path.exists(suricata_yaml) and os.path.exists(suricata_bin)): print("error: this is not a suricata source directory or " + "suricata is not built") return 1 # Create a SuricataConfig object that is passed to all tests. suricata_config = SuricataConfig(get_suricata_version()) suricata_config.valgrind = args.valgrind tdir = os.path.join(TOPDIR, "tests") if args.testdir: tdir = os.path.abspath(args.testdir) # First gather the tests so we can run them in alphabetic order. tests = [] for dirpath, dirnames, filenames in os.walk(tdir, followlinks = True): # The top directory is not a test... if dirpath == os.path.join(TOPDIR, "tests"): continue if dirpath == tdir: continue basename = os.path.basename(dirpath) if args.skip_tests: skip_tests_opt = False patterns = args.skip_tests.split(",") for pattern in patterns: if args.exact: if pattern == basename: skip_tests_opt = True break elif basename.find(pattern) > -1: skip_tests_opt = True break if skip_tests_opt: continue # Check if there are sub-test directories if "test.yaml" in filenames or "check.sh" in filenames: # gets used by os.walk in this for loop dirnames[0:] = [] else: continue if not args.patterns: tests.append(dirpath) else: for pattern in args.patterns: if args.exact: if pattern == basename: tests.append(dirpath) elif basename.find(pattern) > -1: tests.append(dirpath) # Sort alphabetically. tests.sort() if LINUX: run_mp(args.j, tests, dirpath, args, cwd, suricata_config) else: run_single(tests, dirpath, args, cwd, suricata_config) passed = count_dict["passed"] failed = count_dict["failed"] skipped = count_dict["skipped"] print("") print("PASSED: %d" % (passed)) print("FAILED: %d" % (failed)) print("SKIPPED: %d" % (skipped)) if args.debugfailed: if len(failedLogs) > 0: print("") print("Failed tests debug output:") for dirpath in failedLogs: print("- Test %s:" % os.path.basename(dirpath)) for r, d, f in os.walk(dirpath+"/output"): for fname in f: path = os.path.join(r, fname) print(" - %s" % path) try: with open(path, "r") as fcontents: try: buf = fcontents.read().decode() print(buf) except: print(" - [Not dumping file that won't utf-8 decode]") except Exception as err: print("Failed to open %s: %s" % (path, str(err))) if failed > 0: return 1 return 0 if __name__ == "__main__": sys.exit(main())
image_introspect.py
"""introspect an image""" import json import re import subprocess import sys import multiprocessing from types import SimpleNamespace from typing import Any from typing import Callable, List from typing import Dict from typing import Union from queue import Queue # pylint: disable=broad-except PROCESSES = (multiprocessing.cpu_count() - 1) or 1 class Command(SimpleNamespace): # pylint: disable=too-few-public-methods """command obj holder""" id: str command: str parse: Callable stdout: str = "" stderr: str = "" details: Union[List, Dict, str] = "" errors: List = [] def run_command(command: Command) -> None: """run a command""" try: proc_out = subprocess.run( command.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, universal_newlines=True, shell=True, ) command.stdout = proc_out.stdout except subprocess.CalledProcessError as exc: command.stderr = str(exc.stderr) command.errors = [str(exc.stderr)] def worker(pending_queue: multiprocessing.Queue, completed_queue: multiprocessing.Queue) -> None: """a worker, pulls from pending, runs & processes places in completed""" while True: command = pending_queue.get() if command is None: break run_command(command) try: command.parse(command) except Exception as exc: command.errors = command.errors + [str(exc)] completed_queue.put(command) class CommandRunner: """runs commands""" def __init__(self): self._completed_queue: Union[Queue, None] = None self._pending_queue: Union[Queue, None] = None @staticmethod def run_sproc(cmd_clss: Any): """run with a single proc""" all_commands = tuple(cmd for cmd_cls in cmd_clss for cmd in cmd_cls.commands) results = [] for command in all_commands: run_command(command) try: command.parse(command) except Exception as exc: command.errors = command.errors + [str(exc)] results.append(command) return results def run_mproc(self, cmd_clss): """start the workers, unload the completed queue""" if self._completed_queue is None: self._completed_queue = multiprocessing.Manager().Queue() if self._pending_queue is None: self._pending_queue = multiprocessing.Manager().Queue() results = {} all_commands = tuple(cmd for cmd_cls in cmd_clss for cmd in cmd_cls.commands) self.start_workers(all_commands) results = [] while len(results) != len(all_commands): results.append(self._completed_queue.get()) return results def start_workers(self, jobs): """start workers, submit jobs to pending queue""" worker_count = min(len(jobs), PROCESSES) processes = [] for _proc in range(worker_count): proc = multiprocessing.Process( target=worker, args=(self._pending_queue, self._completed_queue) ) processes.append(proc) proc.start() for job in jobs: self._pending_queue.put(job) for _proc in range(worker_count): self._pending_queue.put(None) for proc in processes: proc.join() class CmdParser: """A base class for command parsers with common parsing functions.""" @staticmethod def _strip(value: str) -> str: """strip off spaces and quotes""" return value.strip('"').strip("'").strip() @staticmethod def re_partition(content, separator): """like partition, but uses an re""" separator_match = re.search(separator, content) if not separator_match: return content, "", "" matched_separator = separator_match.group(0) parts = re.split(matched_separator, content, 1) return parts[0], matched_separator, parts[1] def splitter(self, lines, delimiter): """split sections of delimited results""" results = [] result = {} while lines: line = lines.pop() left, delim, right = self.re_partition(line, delimiter) right = self._strip(right) if not delim: if result: results.append(result) result = {} continue key = left.lower().replace("_", "-").strip() value = right result[key] = value if result: results.append(result) return results class AnsibleCollections(CmdParser): """collect ansible collections""" @property def commands(self): """The command to run for listing ansible collections.""" command = "ansible-galaxy collection list" return [ Command( id="ansible_collections", command=command, parse=self.parse, ) ] @staticmethod def parse(command: Command): """parse""" collections = {} for line in command.stdout.splitlines(): parts = line.split() if len(parts) == 2 and parts[1][0].isdigit(): collections[parts[0].strip()] = parts[1].strip() command.details = collections class AnsibleVersion(CmdParser): """collect ansible version""" @property def commands(self) -> List[Command]: """generate the command""" return [Command(id="ansible_version", command="ansible --version", parse=self.parse)] @staticmethod def parse(command: Command) -> None: """parse""" version = command.stdout.splitlines()[0].split(" ", 1)[1].strip()[1:-1] command.details = version class OsRelease(CmdParser): """collect os release info""" @property def commands(self) -> List[Command]: """generate the command""" return [Command(id="os_release", command="cat /etc/os-release", parse=self.parse)] def parse(self, command) -> None: """parse""" parsed = self.splitter(command.stdout.splitlines(), "=") command.details = parsed class PythonPackages(CmdParser): """collect python packages""" @property def commands(self) -> List[Command]: """generate the command""" pre = Command(id="pip_freeze", command="python3 -m pip freeze", parse=self.parse_freeze) run_command(pre) pre.parse(pre) pkgs = " ".join(pkg for pkg in pre.details[0]) return [ Command(id="python_packages", command=f"python3 -m pip show {pkgs}", parse=self.parse) ] def parse(self, command): """parse""" parsed = self.splitter(command.stdout.splitlines(), ":") for pkg in parsed: for entry in ["required-by", "requires"]: if pkg[entry]: pkg[entry] = [p.strip() for p in pkg[entry].split(",")] else: pkg[entry] = [] command.details = parsed def parse_freeze(self, command): """parse pip freeze""" # skip the editables lines = [line for line in command.stdout.splitlines() if not line.startswith("-e")] parsed = self.splitter(lines, "(==|@)") command.details = parsed class RedhatRelease(CmdParser): """collect rh release""" @property def commands(self) -> List[Command]: """generate the command""" return [Command(id="redhat_release", command="cat /etc/redhat-release", parse=self.parse)] @staticmethod def parse(command): """parse""" parsed = command.stdout command.details = parsed class SystemPackages(CmdParser): """collect system pkgs""" @property def commands(self) -> List[Command]: """generate the command""" return [Command(id="system_packages", command="rpm -qai", parse=self.parse)] def parse(self, command): """parse""" packages = [] package = [] for line in command.stdout.splitlines(): if re.match(r"^Name\s{2,}:", line) and package: packages.append(package) package = [line] else: package.append(line) if package: packages.append(package) parsed = [] for package in packages: entry = {} while package: line = package.pop(0) left, _delim, right = self.re_partition(line, ":") key = left.lower().replace("_", "-").strip() # Description is at the end of the package section # read until package is empty if key == "description": description = [] while package: description.append(package.pop(0)) # Normalize the data, in the case description is totally empty if description: entry[key] = "\n".join(description) else: entry[key] = "No description available" parsed.append(entry) # other package details are 1 line each else: value = self._strip(right) entry[key] = value command.details = parsed def main(): """start here""" response = {"errors": []} response["python_version"] = {"details": {"version": " ".join(sys.version.splitlines())}} try: command_runner = CommandRunner() commands = [ AnsibleCollections(), AnsibleVersion(), OsRelease(), RedhatRelease(), PythonPackages(), SystemPackages(), ] results = command_runner.run_mproc(commands) for result in results: dicted = vars(result) dicted.pop("parse") for key in list(dicted.keys()): if key not in ["details", "errors"]: dicted[f"__{key}"] = dicted[key] dicted.pop(key) response[dicted["__id"]] = dicted except Exception as exc: response["errors"].append(str(exc)) print(json.dumps(response)) if __name__ == "__main__": main()
camera.py
#!/usr/bin/env python3 """ MIT License Copyright (c) 2021 Marcin Sielski <marcin.sielski@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # picamera module must be imported before gi module, # otherwise stack corruption occurs. import picamera import gi gi.require_version('Gst', '1.0') gi.require_version('GstBase', '1.0') gi.require_version('GstRtspServer', '1.0') from gi.repository import Gst, GstBase, GstRtspServer, GLib from signal import pause from subprocess import call from os import system #from netifaces import ifaddresses import threading import time import falcon from wsgiserver import WSGIServer import json import logging import inspect from argparse import ArgumentParser, ArgumentTypeError import signal import os import shutil import psutil from gpiozero import DiskUsage, CPUTemperature import subprocess import datetime import arducam_mipicamera as arducam import sys #import tracemalloc #tracemalloc.start() def camera_revision(): stdout_bk = os.dup(sys.stderr.fileno()) pipefd = os.pipe2(0) os.dup2(pipefd[1], sys.stderr.fileno()) arducam.mipi_camera().init_camera() os.close(pipefd[1]) os.dup2(stdout_bk, sys.stderr.fileno()) i = 0 revision = '' while True: ch = os.read(pipefd[0],1) if i >= 13 and ch != b' ': revision = revision + ch.decode("utf-8") i = i + 1 if ch == b' ' and i > 13 and i < 30: break return revision def name(obj): """ Utility function that adds space before 'Serv' in the name of the object Args: obj (any): object Returns: str: returns human readable name of name of the specified object """ return type(obj).__name__.replace("Serv", " Serv") class Server(object): """ Server Interface Args: object (object): base object """ def init(self): """ Initialize the Server """ pass def start(self): """ Starts execution of the Server """ pass def stop(self): """ Stops execution of the Server """ pass def restart(self): """ Restarts the Server """ self.stop() self.init() self.start() class RTSPServer(Server): """ Real Time Streaming Protocol (RTSP) Server """ def __init__( self, camera_server, address='0.0.0.0', port='8000', path='/pi'): """ Initialize RTSP Server Args: address (str): ip address port (str): port path (str): path """ self.__address__ = address self.__port__ = port self.__path__ = path self.__camera_server__ = camera_server self.__main_loop__ = GLib.MainLoop() server = GstRtspServer.RTSPServer.new() #address = ifaddresses('wlan1')[2][0]['addr'] launch_description = ( '( udpsrc port=3141 ! application/x-rtp, ' 'media=video, encoding-name=H264, clock-rate=90000 ! rtph264depay ' '! rtph264pay name=pay0 )') server.set_address(self.__address__) server.set_service(self.__port__) server.connect('client-connected', self.client_connected) factory = GstRtspServer.RTSPMediaFactory.new() factory.set_launch(launch_description) factory.set_shared(True) factory.set_transport_mode(GstRtspServer.RTSPTransportMode.PLAY) mount_points = server.get_mount_points() mount_points.add_factory(self.__path__, factory) server.attach(None) def client_connected(self, server, client): """ Callback method executed upon client connection Args: server (RTSPServer): RTSP server client (RTSPClient): RTSP client """ logging.info( "RTSP Client connected from " + client.get_connection().get_ip()) self.__camera_server__.send_keyframe() def start(self): """ Start RTSP Server """ logging.info( name(self) + " started at rtsp://" + self.__address__ + ":" + self.__port__ + self.__path__) self.__main_loop__.run() def stop(self): """ Stop RTSP Server """ self.__main_loop__.quit() logging.info(name(self) + " stopped") class CORSComponent(object): """ Cross-Origin Resource Sharing (CORS) Component as defined at https://falcon.readthedocs.io/en/stable/user/faq.html#how-do-i-implement-cors-with-falcon """ def process_response(self, req, resp, resource, req_succeeded): """ Process response from HTTPS Server and add CORS headers Args: req (Request): request resp (Response): response resource (HTTPSServer): HTTPS Server req_succeeded (bool): indicated if request succeeded """ resp.set_header('Access-Control-Allow-Origin', '*') if ( req_succeeded and req.method == 'OPTIONS' and req.get_header('Access-Control-Request-Method') ): # NOTE(kgriffs): This is a CORS preflight request. Patch the # response accordingly. allow = resp.get_header('Allow') resp.delete_header('Allow') allow_headers = req.get_header( 'Access-Control-Request-Headers', default='*' ) resp.set_headers(( ('Access-Control-Allow-Methods', allow), ('Access-Control-Allow-Headers', allow_headers), ('Access-Control-Max-Age', '86400'), # 24 hours )) class HTTPSServer(WSGIServer): """ HyperText Transfer Protocol Secure (HTTPS) Server """ def __init__( self, camera_server, address='0.0.0.0', port=8888, path='/', keyfile='/opt/camera/bin/key.pem', certfile='/opt/camera/bin/cert.pem'): """ Initialize HTTPS Server Args: camera_server (CameraServer): camera server address (str): address port (str): port path (str): path keyfile (str): path to keyfile certfile (str): path to certfile """ self.__camera_server__ = camera_server self.__address__ = address self.__port__ = port self.__path__ = path app = falcon.API(middleware=[CORSComponent()]) #app.add_route('/pi', self) app.add_route(path, self) super().__init__( app, host=self.__address__, port=self.__port__, keyfile=keyfile, certfile=certfile) def start(self): """ Start HTTPS Server """ logging.info( name(self) + " started at https://" + self.__address__ + ":" + str(self.__port__) + self.__path__) super().start() def stop(self): """ Stop HTTPS Server """ super().stop() logging.info(name(self) + " stopped") def error_log(self, msg, level, traceback): """ Log error message Args: msg (str): error message level (int): log level traceback (bool): indicate whether to enable backtrace """ if msg != 'Error in HTTPServer.tick': logging.log(level, msg) if traceback: logging.log(level, logging.traceback.format_exc()) def on_get(self, req, resp): """ Handle HTTP GET request Args: req (Request): request resp (Response): response """ logging.info(req.params) resp.status = falcon.HTTP_200 # Quality if 'width' in req.params and 'height' in req.params: self.__camera_server__.set_resolution(int(req.params['width']), int(req.params['height'])) if 'framerate' in req.params: self.__camera_server__.set_framerate(int(req.params['framerate'])) if 'bitrate_mode' in req.params: self.__camera_server__.set_bitrate_mode( int(req.params['bitrate_mode'])) if 'bitrate' in req.params: self.__camera_server__.set_bitrate(int(req.params['bitrate'])) if 'sensor_mode' in req.params: self.__camera_server__.set_sensor_mode( int(req.params['sensor_mode'])) # Effects if 'brightness' in req.params: self.__camera_server__.set_brightness( int(req.params['brightness'])) if 'contrast' in req.params: self.__camera_server__.set_contrast(int(req.params['contrast'])) if 'saturation' in req.params: self.__camera_server__.set_saturation(int(req.params['saturation'])) if 'sharpness' in req.params: self.__camera_server__.set_sharpness(int(req.params['sharpness'])) if 'drc' in req.params: self.__camera_server__.set_drc(int(req.params['drc'])) if 'image_effect' in req.params: self.__camera_server__.set_image_effect( int(req.params['image_effect'])) if 'awb_mode' in req.params: self.__camera_server__.set_awb_mode(int(req.params['awb_mode'])) if 'awb_gain_blue' in req.params: self.__camera_server__.set_awb_gain_blue( int(req.params['awb_gain_blue'])) if 'awb_gain_red' in req.params: self.__camera_server__.set_awb_gain_red( int(req.params['awb_gain_red'])) # Controls if 'exposure_mode' in req.params: self.__camera_server__.set_exposure_mode( int(req.params['exposure_mode'])) if 'exposure_compensation' in req.params: self.__camera_server__.set_exposure_compensation( int(req.params['exposure_compensation'])) if 'metering_mode' in req.params: self.__camera_server__.set_metering_mode( int(req.params['metering_mode'])) if 'iso' in req.params: self.__camera_server__.set_iso(int(req.params['iso'])) if 'shutter_speed' in req.params: self.__camera_server__.set_shutter_speed( int(req.params['shutter_speed'])) if 'video_stabilisation' in req.params: self.__camera_server__.set_video_stabilisation( req.params['video_stabilisation'] == '1') if 'gain' in req.params: self.__camera_server__.set_gain(int(req.params['gain'])) if 'awb' in req.params: self.__camera_server__.set_awb(int(req.params['awb'])) # Orientation if 'rotation' in req.params: self.__camera_server__.set_rotation(int(req.params['rotation'])) if 'hflip' in req.params: self.__camera_server__.set_hflip(req.params['hflip'] == '1') if 'vflip' in req.params: self.__camera_server__.set_vflip(req.params['vflip'] == '1') if 'video_direction' in req.params: self.__camera_server__.set_video_direction( int(req.params['video_direction'])) # Controls if 'logging_level' in req.params: self.__camera_server__.set_logging_level(int(req.params['logging_level'])) if 'stats' in req.params: self.__camera_server__.set_stats(int(req.params['stats'],16)) if 'rtsp' in req.params: self.__camera_server__.set_rtsp(req.params['rtsp'] == '1') if 'record' in req.params: self.__camera_server__.set_record(req.params['record'] == '1') if 'format' in req.params: self.__camera_server__.set_format(req.params['format'] == '1') if 'max_files' in req.params: self.__camera_server__.set_max_files(int(req.params['max_files'])) if 'max_size_bytes' in req.params: self.__camera_server__.set_max_size_bytes( int(req.params['max_size_bytes'])) if 'max_size_time' in req.params: self.__camera_server__.set_max_size_time( int(req.params['max_size_time'])) if 'persistent' in req.params: self.__camera_server__.set_persistent(int(req.params['persistent'])) if 'continuation' in req.params: self.__camera_server__.set_continuation(req.params['continuation'] == '1') if 'media' in req.params: resp.body = (self.__camera_server__.get_media()) return if 'restart' in req.params: self.__camera_server__.restart() if 'remove' in req.params: self.__camera_server__.remove(req.params['remove']) resp.body = (self.__camera_server__.get_media()) return if 'time' in req.params: self.__camera_server__.set_time(int(req.params['time'])) resp.body = (self.__camera_server__.get_parameters()) class CameraServer(Server): """ Camera Server """ def __init__(self, args): """ Initialize Camera Server """ self.__camera_timeout__ = args.camera_timeout self.__throughput__ = args.throughput self.__default_logging_level__ = getattr(logging, args.debug.upper()) self.__error_lock__ = threading.Lock() self.__main_lock__ = threading.Lock() self.__restart_lock__ = threading.Lock() self.__image_effect_lock__ = threading.Lock() try: with picamera.PiCamera() as camera: self.__model__ = camera.revision except: self.__model__ = camera_revision() self.__stats_id__ = 0 self.__extra_controls__ = 'encode,video_bitrate_mode={},h264_profile=0,\ h264_level=11,video_bitrate={},h264_i_frame_period={}' parameters = None try: with open('camera.json', 'r') as config: parameters = json.load(config) except: logging.warning("'camera.json' not found") if parameters is not None and 'persistent' in parameters and \ parameters['persistent'] == 1: logging.info("Loading parameters from 'camera.json'") # Quality if 'width' in parameters: self.__width__ = parameters['width'] else: if self.__model__ == 'imx219': self.__width__ = 800 if self.__model__ == 'ov9281': self.__width__ = 1280 if 'height' in parameters: self.__height__ = parameters['height'] else: if self.__model__ == 'imx219': self.__height__ = 608 if self.__model__ == 'ov9281': self.__width__ = 800 if 'framerate' in parameters: self.__framerate__ = parameters['framerate'] else: self.__framerate__ = 30 if 'bitrate_mode' in parameters: self.__bitrate_mode__ = parameters['bitrate_mode'] else: self.__bitrate_mode__ = 0 if 'bitrate' in parameters: self.__bitrate__ = parameters['bitrate'] else: self.__bitrate__ = 3000000 if 'sensor_mode' in parameters: self.__sensor_mode__ = parameters['sensor_mode'] else: self.__sensor_mode__ = 0 # Effects if 'brightness' in parameters: self.__brightness__ = parameters['brightness'] else: self.__brightness__ = 50 if 'contrast' in parameters: self.__contrast__ = parameters['contrast'] else: self.__contrast__ = 0 if 'saturation' in parameters: self.__saturation__ = parameters['saturation'] else: self.__saturation__ = 0 if 'sharpness' in parameters: self.__sharpness__ = parameters['sharpness'] else: self.sharpness = 0 if 'drc' in parameters: self.__drc__ = parameters['drc'] else: self.__drc = 0 if 'image_effect' in parameters: self.__image_effect__ = parameters['image_effect'] else: self.__image_effect__ = 0 if 'awb_mode' in parameters: self.__awb_mode__ = parameters['awb_mode'] else: self.__awb_mode__ = 1 if 'awb_gain_blue' in parameters: self.__awb_gain_blue__ = parameters['awb_gain_blue'] else: self.__awb_gain_blue__ = 0 if 'awb_gain_red' in parameters: self.__awb_gain_red__ = parameters['awb_gain_red'] else: self.__awb_gain_red__ = 0 # Settings if 'exposure_mode' in parameters: self.__exposure_mode__ = parameters['exposure_mode'] else: self.__exposure_mode__ = 1 if 'metering_mode' in parameters: self.__metering_mode__ = parameters['metering_mode'] else: self.__metering_mode__ = 0 if 'exposure_compensation' in parameters: self.__exposure_compensation__ = \ parameters['exposure_compensation'] else: self.__exposure_compensation__ = 0 if 'iso' in parameters: self.__iso__ = parameters['iso'] else: self.__iso__ = 0 if 'shutter_speed' in parameters: self.__shutter_speed__ = parameters['shutter_speed'] else: self.__shutter_speed__ = 0 if 'video_stabilisation' in parameters: self.__video_stabilisation__ = \ (parameters['video_stabilisation'] == 1) else: self.__video_stabilisation__ = False if 'gain' in parameters: self.__gain__ = parameters['gain'] else: self.__gain__ = 1 if 'awb' in parameters: self.__awb__ = parameters['awb'] else: self.__awb__ = 4 # Orientation if 'rotation' in parameters: self.__rotation__ = parameters['rotation'] else: self.__rotation__ = 0 if 'hflip' in parameters: self.__hflip__ = (parameters['hflip'] == 1) else: self.__hflip__ = False if 'vflip' in parameters: self.__vflip__ = (parameters['vflip'] == 1) else: self.__vflip__ = False if 'video_direction' in parameters: self.__video_direction__ = parameters['video_direction'] else: self.__video_direction__ = 0 # Controls if 'logging_level' in parameters: self.__logging_level__ = parameters['logging_level'] else: self.__logging_level__ = 0 if 'stats' in parameters: self.__stats__ = int(parameters['stats'], 16) else: self.__stats__ = 0x00000000 if 'rtsp' in parameters: self.__rtsp__ = (parameters['rtsp'] == 1) else: self.__rtsp__ = False if 'record' in parameters: self.__record__ = (parameters['record'] == 1) else: self.__record__ = False if 'format' in parameters: self.__format__ = (parameters['format'] == 1) else: self.__format__ = False if 'max_files' in parameters: self.__max_files__ = parameters['max_files'] else: self.__max_files__ = 0 if 'max_size_bytes' in parameters: self.__max_size_bytes__ = parameters['max_size_bytes'] else: self.__max_size_bytes__ = 0 if 'max_size_time' in parameters: self.__max_size_time__ = parameters['max_size_time'] else: self.__max_size_time__ = 0 if 'continuation' in parameters: self.__continuation__ = (parameters['continuation'] == 1) if self.__continuation__: self.__record__ = True else: self.__continuation__ = False if self.__continuation__: self.__fragment_id__ = parameters['fragment_id'] else: self.__fragment_id__ = 0 self.__persistent__ = (parameters['persistent'] == 1) else: # Quality if self.__model__ == 'imx219': self.__width__ = 800 self.__height__ = 608 if self.__model__ == 'ov9281': self.__width__ = 1280 self.__height__ = 800 self.__framerate__ = 30 self.__bitrate_mode__ = 0 self.__bitrate__ = 3000000 self.__sensor_mode__ = 0 # Effects self.__brightness__ = 50 self.__contrast__ = 0 self.__saturation__ = 0 self.__sharpness__ = 0 self.__drc__ = 0 self.__image_effect__ = 0 self.__awb_mode__ = 1 self.__awb_gain_blue__ = 0 self.__awb_gain_red__ = 0 # Settings self.__exposure_mode__ = 1 self.__metering_mode__ = 0 self.__exposure_compensation__ = 0 self.__iso__ = 0 self.__shutter_speed__ = 0 # TODO(marcin.sielski): Change it to True self.__video_stabilisation__ = False self.__gain__ = 1 self.__awb__ = 4 # Orientation self.__rotation__ = 0 self.__hflip__ = False self.__vflip__ = False self.__video_direction__ = 0 # Controls self.__logging_level__ = 0 self.__stats__ = 0x00000000 self.__rtsp__ = False self.__record__ = False self.__format__ = False self.__max_files__ = 0 self.__max_size_bytes__ = 0 self.__max_size_time__ = 0 self.__fragment_id__ = 0 self.__continuation__ = False self.__persistent__ = False self.init() def set_time(self, time): """ Sets system time to specified time Args: time (int): time to set """ os.system('sudo timedatectl set-time @' + str(time)) os.system('sudo fake-hwclock') os.sync() def send_keyframe(self): """ Forces to send key frame """ srcpad = self.__encoder__.get_static_pad( "src") structure = Gst.Structure.new_empty("GstForceKeyUnit") structure.set_value('all-headers', True) srcpad.send_event( Gst.Event.new_custom(Gst.EventType.CUSTOM_UPSTREAM, structure)) def get_parameters(self): """ Return Camera Server parameters set Returns: json: Camera Server parameters set """ self.send_keyframe() return json.dumps( { 'model': self.__model__, # Quality 'width': self.__width__, 'height': self.__height__, 'framerate': self.__framerate__, 'bitrate_mode': self.__bitrate_mode__, 'bitrate': self.__bitrate__, 'sensor_mode': self.__sensor_mode__, # Effects 'brightness': self.__brightness__, 'contrast': self.__contrast__, 'saturation': self.__saturation__, 'sharpness': self.__sharpness__, 'drc': self.__drc__, 'image_effect': self.__image_effect__, 'awb_mode': self.__awb_mode__, 'awb_gain_blue': self.__awb_gain_blue__, 'awb_gain_red': self.__awb_gain_red__, # Settings 'exposure_mode': self.__exposure_mode__, 'metering_mode': self.__metering_mode__, 'exposure_compensation': self.__exposure_compensation__, 'iso': self.__iso__, 'shutter_speed': self.__shutter_speed__, 'video_stabilisation': int(self.__video_stabilisation__), 'gain': self.__gain__, 'awb': self.__awb__, # Orientation 'rotation': self.__rotation__, 'hflip': int(self.__hflip__), 'vflip': int(self.__vflip__), 'video_direction': self.__video_direction__, # Controls 'logging_level': int(self.__logging_level__), 'stats': '{0:#0{1}x}'.format(self.__stats__,10), 'rtsp': int(self.__rtsp__), 'record': int(self.__record__), 'format': int(self.__format__), 'max_files': self.__max_files__, 'max_size_bytes': self.__max_size_bytes__, 'max_size_time': self.__max_size_time__, 'persistent': int(self.__persistent__), 'fragment_id': self.__fragment_id__, 'continuation': int(self.__continuation__) }, sort_keys=True) def __get_key__(self, e): """ Obtain key """ return e[0] def get_media(self): """ Obtain names of media files from the media folder Returns: json: list of media files from the media folder """ media = [] _, _, free = shutil.disk_usage('/') media.append([str(free // (2**30))]) _, _, filenames = next(os.walk('.')) for filename in filenames: if filename.endswith('.mkv') or filename.endswith('.mp4'): media.append( [filename, datetime.datetime.fromtimestamp(os.path.getmtime(filename)).strftime("%Y-%m-%d, %H:%M")]) media.sort(key=self.__get_key__) return json.dumps(media, sort_keys=True) def init(self): """ Initialize streaming pipeline """ self.__pipeline__ = Gst.Pipeline('camera-server-pipeline') self.__source__ = self.__get_source__() self.__source_caps__ = Gst.Caps.new_empty_simple('video/x-raw') self.__source_caps__.set_value('width', self.__width__) self.__source_caps__.set_value('height', self.__height__) if self.__model__ == 'imx219': self.__source_caps__.set_value('format', 'I420') if self.__model__ == 'ov9281': self.__source_caps__.set_value('format', 'GRAY8') self.__source_caps__.set_value( 'framerate', Gst.Fraction(self.__framerate__, 1)) self.__source_capsfilter__ = Gst.ElementFactory.make( 'capsfilter', 'source-capsfilter') self.__source_capsfilter__.set_property('caps', self.__source_caps__) if self.__model__ == 'ov9281': self.__overlay__ = Gst.ElementFactory.make( 'textoverlay', 'text-overlay') self.__overlay__.set_property('shaded-background', True) self.__overlay__.set_property('valignment','top') self.__overlay__.set_property('font-desc', 'Arial, 12') self.__raw_tee__ = Gst.ElementFactory.make('tee', 'raw-tee') if self.__model__ == 'ov9281': self.__converter__ = Gst.ElementFactory.make( 'videoconvert', 'converter') self.__converter_caps__ = Gst.Caps.new_empty_simple('video/x-raw') self.__converter_caps__.set_value('width', self.__width__) self.__converter_caps__.set_value('height', self.__height__) self.__converter_caps__.set_value( 'framerate', Gst.Fraction(self.__framerate__, 1)) self.__converter_caps__.set_value('format', 'RGB') self.__converter_capsfilter__ = Gst.ElementFactory.make( 'capsfilter', 'converter-capsfilter') self.__converter_capsfilter__.set_property('caps', self.__converter_caps__) self.__encoder__ = Gst.ElementFactory.make('v4l2h264enc', 'encoder') self.__encoder__.set_property( 'extra-controls', Gst.Structure.new_from_string( self.__extra_controls__.format(self.__bitrate_mode__, self.__bitrate__, self.__framerate__))) self.__encoder_caps__ = Gst.Caps.new_empty_simple('video/x-h264') self.__encoder_caps__.set_value('profile', 'baseline') self.__encoder_caps__.set_value('level', '4') self.__encoder_capsfilter__ = Gst.ElementFactory.make( 'capsfilter', 'encoder-capsfilter') self.__encoder_capsfilter__.set_property('caps', self.__encoder_caps__) self.__parser__ = Gst.ElementFactory.make('h264parse', 'parser') GstBase.BaseParse.set_infer_ts(self.__parser__, True) GstBase.BaseParse.set_pts_interpolation(self.__parser__, True) self.__parser__.set_property('config-interval', -1) self.__h264_tee__ = Gst.ElementFactory.make('tee', 'h264-tee') self.__payloader__ = Gst.ElementFactory.make('rtph264pay', 'payloader') self.__payloader__.set_property('config-interval', -1) self.__rtsp_tee__ = Gst.ElementFactory.make('tee', 'rtsp-tee') self.__sink_queue__ = Gst.ElementFactory.make('queue', 'sink-queue') self.__sink_queue__.set_property( 'max-size-buffers', 0) self.__sink_queue__.set_property( 'max-size-bytes', 0) self.__sink_queue__.set_property('max-size-time', 0) self.__sink__ = Gst.ElementFactory.make('udpsink', 'sink') self.__sink__.set_property('host', '127.0.0.1') self.__sink__.set_property('port', 31415) self.__sink__.set_property('sync', False) self.__pipeline__.add(self.__source__) self.__pipeline__.add(self.__source_capsfilter__) if self.__model__ == 'ov9281': self.__pipeline__.add(self.__overlay__) self.__pipeline__.add(self.__raw_tee__) if self.__model__ == 'ov9281': self.__pipeline__.add(self.__converter__) self.__pipeline__.add(self.__converter_capsfilter__) self.__pipeline__.add(self.__encoder__) self.__pipeline__.add(self.__encoder_capsfilter__) self.__pipeline__.add(self.__parser__) self.__pipeline__.add(self.__h264_tee__) self.__pipeline__.add(self.__payloader__) self.__pipeline__.add(self.__rtsp_tee__) self.__pipeline__.add(self.__sink_queue__) self.__pipeline__.add(self.__sink__) self.__source__.link(self.__source_capsfilter__) if self.__model__ == 'imx219': self.__source_capsfilter__.link(self.__raw_tee__) self.__raw_tee__.link(self.__encoder__) if self.__model__ == 'ov9281': self.__source_capsfilter__.link(self.__overlay__) self.__overlay__.link(self.__raw_tee__) self.__raw_tee__.link(self.__converter__) self.__converter__.link(self.__converter_capsfilter__) self.__converter_capsfilter__.link(self.__encoder__) self.__encoder__.link(self.__encoder_capsfilter__) self.__encoder_capsfilter__.link(self.__parser__) self.__parser__.link(self.__h264_tee__) self.__h264_tee__.link(self.__payloader__) self.__payloader__.link(self.__rtsp_tee__) self.__rtsp_tee__.link(self.__sink_queue__) self.__sink_queue__.link(self.__sink__) self.bus = self.__pipeline__.get_bus() self.bus.set_sync_handler(self.__on_message__) self.__file_queue__ = None self.__file_rate__ = None self.__file_converter__ = None self.__file_encoder__ = None self.__file_sink__ = None self.__raw_framerate__ = 0 def start(self): """ Start Camera Server """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") logging.info(name(self) + " started") self.set_logging_level(self.__logging_level__) self.__pipeline__.set_state(Gst.State.PLAYING) self.set_stats(self.__stats__) # if streaming is configured if self.__rtsp__: self.__rtsp__ = False # start streaming during startup logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.set_rtsp(True, True) logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") # if recording is configured if self.__record__: self.__record__ = False # start recording during startup logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.set_record(True, True) logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") logging.debug(function_name + ": exit") def stop(self): """ Stop Camera Server """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if not self.__record__ and self.__stats__ == 0x0000040C: self.__stats__ = 0x000000000 if self.__persistent__: logging.info("Writing parameters to 'camera.json' file") with open('camera.json', 'w') as config: config.write(self.get_parameters()) os.system('sudo fake-hwclock') os.sync() else: parameters = None try: with open('camera.json', 'r') as config: parameters = json.load(config) except: pass if parameters is not None: parameters['persistent'] = self.__persistent__ with open('camera.json', 'w') as config: json.dump(parameters, config) if self.__stats_id__ != 0: GLib.source_remove(self.__stats_id__) self.__stats_id__ = 0 # if still streaming during shutdown if self.__rtsp__: # stop streaming during shutdown logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.set_rtsp(False, True) logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") # if still recording during shutdown if self.__record__: # stop recording during shutdown logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.set_record(False, True) logging.debug( function_name + ": __restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) self.__restart_lock__.release() logging.debug(function_name + ": __restart_lock__.release()") # NOTE(marcin.sielski): Make sure pipeline elements are set to # Gst.State.NULL so that the object can be safely disposed. # if self.__source__ is not None: # self.__source__.set_state(Gst.State.NULL) # self.__source__.unlink(self.__source_capsfilter__) # self.__pipeline__.remove(self.__source__) # self.__source__ = None # del self.__source__ # if self.__source_capsfilter__ is not None: # self.__source_capsfilter__.set_state(Gst.State.NULL) # self.__source_capsfilter__.unlink(self.__raw_tee__) # self.__pipeline__.remove(self.__source_capsfilter__) # self.__source_capsfilter__ = None # self.__source_caps__ = None # del self.__source_caps__ # if self.__raw_tee__ is not None: # self.__raw_tee__.set_state(Gst.State.NULL) # self.__raw_tee__.unlink(self.__converter__) # self.__pipeline__.remove(self.__raw_tee__) # self.__raw_tee__ = None # del self.__raw_tee__ # if self.__converter__ is not None: # self.__converter__.set_state(Gst.State.NULL) # self.__converter__.unlink(self.__converter_capsfilter__) # self.__pipeline__.remove(self.__converter__) # self.__converter__ = None # del self.__converter__ # if self.__converter_capsfilter__ is not None: # self.__converter_capsfilter__.set_state(Gst.State.NULL) # self.__converter_capsfilter__.unlink(self.__encoder__) # self.__pipeline__.remove(self.__converter_capsfilter__) # self.__converter_capsfilter__ = None # self.__converter_caps__ = None # del self.__converter_caps__ # if self.__encoder__ is not None: # self.__encoder__.set_state(Gst.State.NULL) # self.__encoder__.unlink(self.__encoder_capsfilter__) # self.__pipeline__.remove(self.__encoder__) # self.__encoder__ = None # del self.__encoder__ # if self.__encoder_capsfilter__ is not None: # self.__encoder_capsfilter__.set_state(Gst.State.NULL) # self.__encoder_capsfilter__.unlink(self.__parser__) # self.__pipeline__.remove(self.__encoder_capsfilter__) # self.__encoder_capsfilter__ = None # self.__encoder_caps__ = None # del self.__encoder_caps__ # if self.__parser__ is not None: # self.__parser__.set_state(Gst.State.NULL) # self.__parser__.unlink(self.__h264_tee__) # self.__pipeline__.remove(self.__parser__) # self.__parser__ = None # del self.__parser__ # if self.__h264_tee__ is not None: # self.__h264_tee__.set_state(Gst.State.NULL) # self.__h264_tee__.unlink(self.__payloader__) # self.__pipeline__.remove(self.__h264_tee__) # self.__h264_tee__ = None # del self.__h264_tee__ # if self.__payloader__ is not None: # self.__payloader__.set_state(Gst.State.NULL) # self.__payloader__.unlink(self.__rtsp_tee__) # self.__pipeline__.remove(self.__payloader__) # self.__payloader__ = None # del self.__payloader__ # if self.__rtsp_tee__ is not None: # self.__rtsp_tee__.set_state(Gst.State.NULL) # self.__rtsp_tee__.unlink(self.__sink_queue__) # self.__pipeline__.remove(self.__rtsp_tee__) # self.__rtsp_tee__ = None # del self.__rtsp_tee__ # if self.__sink_queue__ is not None: # self.__sink_queue__.set_state(Gst.State.NULL) # self.__sink_queue__.unlink(self.__sink__) # self.__pipeline__.remove(self.__sink_queue__) # self.__sink_queue__ = None # del self.__sink_queue__ # if self.__sink__ is not None: # self.__sink__.set_state(Gst.State.NULL) # self.__pipeline__.remove(self.__sink__) # self.__sink__ = None # del self.__sink__ # # if self.__file_queue__ is not None: # self.__file_queue__.set_state(Gst.State.NULL) # self.__file_queue__ = None # if self.__file_rate__ is not None: # self.__file_rate__.set_state(Gst.State.NULL) # self.__file_rate__ = None # if self.__file_converter__ is not None: # self.__file_converter__.set_state(Gst.State.NULL) # self.__file_converter__ = None # if self.__file_encoder__ is not None: # self.__file_encoder__.set_state(Gst.State.NULL) # self.__file_encoder__ = None # if self.__file_sink__ is not None: # self.__file_sink__.set_state(Gst.State.NULL) # self.__file_sink__ = None self.__pipeline__.set_state(Gst.State.NULL) logging.info(name(self) + " stopped") logging.debug(function_name + ": exit") def __on_error_lock_release__(self): """ Release self.__error_lock__ Returns: bool: False to indicate execute once """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if self.__error_lock__.locked(): self.__error_lock__.release() logging.debug(function_name + ": __error_lock.release()") logging.debug(function_name + ": return False") return False def __on_restart__(self): """ Restart Camera Server to try to recover from error Returns: bool: False to indicate execute once """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") # NOTE(marcin.sielski): We are under error condition so try to restart # only streaming. self.__record__ = False self.__rtsp__ = False self.__image_effect__ = 0 self.restart(True) GLib.timeout_add_seconds( round((self.__camera_timeout__ + 500) / 1000), self.__on_error_lock_release__) logging.debug(function_name + ": return False") return False def __on_stop__(self): """ Finalize recording Returns: bool: False to indicate execute only once """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if self.__file_queue__ is not None: self.__file_queue__.set_state(Gst.State.NULL) self.__file_queue__ = None if self.__file_rate__ is not None: self.__file_rate__.set_state(Gst.State.NULL) self.__file_rate__ = None if self.__file_converter__ is not None: self.__file_converter__.set_state(Gst.State.NULL) self.__file_converter__ = None if self.__file_encoder__ is not None: self.__file_encoder__.set_state(Gst.State.NULL) self.__file_encoder__ = None if self.__file_sink__ is not None: self.__file_sink__.set_state(Gst.State.NULL) self.__file_sink__ = None self.__on_store__() if ( self.__model__ == 'imx219' and self.__source__.get_property('annotation-mode') == 0x0000040C ): self.__source__.set_property('annotation-mode', 0x00000000) if self.__model__ == 'ov9281': self.set_stats(self.__stats__) logging.info("Recording stopped") # if request was executed in unsafe context if not self.__safe__: # if not unblocked by an error if self.__main_lock__.locked(): # unblock concurrent requests self.__main_lock__.release() logging.debug(function_name + ": self.__main_lock__.release()") # if request was executed in safe context else: # if not unblocked by an error if self.__restart_lock__.locked(): # unblock execution self.__restart_lock__.release() logging.debug( function_name + ": self.__restart_lock__.release()") logging.debug(function_name + ": return False") return False def __on_message__(self, bus, message): """ Handle messages on the bus Returns: BusSyncReply: with decision what to do further with the message """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name t = message.type logging.debug(function_name + ": "+str(t)) if t == Gst.MessageType.EOS: logging.debug(function_name + ": Gst.MessageType.EOS") logging.info("EOS") GLib.timeout_add_seconds(0, self.__on_restart__) return Gst.BusSyncReply.DROP elif t == Gst.MessageType.ERROR: logging.debug(function_name + ": Gst.MessageType.ERROR") error, debug = message.parse_error() logging.error(str(error) + " at " + str(debug)) if ( str(error) == "gst-stream-error-quark: Internal data stream error. (1)" ): self.__pipeline__.set_state(Gst.State.PLAYING) sinkpad = self.__source__.get_static_pad("src").get_peer() sinkpad.send_event(Gst.Event.new_eos()) else: # if error already is pending if self.__error_lock__.locked(): # drop the message return Gst.BusSyncReply.DROP # notify that server has pending error logging.debug( function_name + ": self.__error_lock__.acquire(blocking=True)") self.__error_lock__.acquire(blocking=True) # release any existing locks to resume execution if self.__main_lock__.locked(): self.__main_lock__.release() logging.debug( function_name + ": self.__main_lock__.release()") if self.__restart_lock__.locked(): self.__restart_lock__.release() logging.debug( function_name + ": self.__restart_lock__.release()") elif t == Gst.MessageType.ELEMENT: logging.debug(function_name + ": Gst.MessageType.ELEMENT") s = message.get_structure() if s.has_name("GstBinForwarded"): forward_msg = s.get_value("message") if forward_msg.type == Gst.MessageType.EOS: logging.info( "EOS received from element " + forward_msg.src.name) if forward_msg.src.name == 'file-sink': srcpad = self.__file_queue__.get_static_pad("src") if ( srcpad is not None and (srcpad.is_blocking() or srcpad.is_blocked()) ): srcpad.remove_probe(self.probe_id) # destroy pipeline self.__pipeline__.remove(self.__file_queue__) if self.__file_rate__ is not None: self.__pipeline__.remove(self.__file_rate__) if self.__file_converter__ is not None: self.__pipeline__.remove(self.__file_converter__) if self.__file_encoder__ is not None: self.__pipeline__.remove(self.__file_encoder__) self.__pipeline__.remove(self.__file_sink__) GLib.timeout_add_seconds(0, self.__on_stop__) return Gst.BusSyncReply.DROP # if s.has_name("GstMultiFileSink"): # self.__index__ = self.__file_sink__.get_property('index') # logging.debug( # function_name + ": self.__index__=" + str(self.__index__)) # # NOTE(marcin.sielski): Workaround for index which cannot be set # # to -1. # if self.__index_changed__ and self.__index__ == 1: # self.__index__ = 0 # self.__file_sink__.set_property('index', self.__index__) # self.__index_changed__ = False # filename = 'v_' + str(self.__width__) + 'x' +\ # str(self.__height__) + '@' +\ # str(self.__raw_framerate__) + '_I420_%02d.raw' # logging.debug( # function_name + ": " + # filename.replace('_I420_%02d.raw','_I420_00.raw')) # self.__file_sink__.set_property('location', filename) # elif self.__index__ >= self.__max_files__ - 1: # self.__index__ = 0 # self.__file_sink__.set_property('index', self.__index__) # self.__index_changed__ = True # filename = 'v_' + str(self.__width__) + 'x' + \ # str(self.__height__) + '@' + str(self.__raw_framerate__) + \ # '_I420_00.raw' # logging.debug( # function_name + ": " + # filename.replace('_I420_00.raw','_I420_09.raw')) # self.__file_sink__.set_property('location', filename) # elif self.__index__ == 0: # filename = 'v_' + str(self.__width__) + 'x' + \ # str(self.__height__) + '@' + str(self.__raw_framerate__) + \ # '_I420_00.raw' # logging.debug(function_name + ": " + filename) # elif self.__index_changed__: # filename = 'v_' + str(self.__width__) + 'x' + \ # str(self.__height__) + '@' + str(self.__raw_framerate__) + \ # '_I420_{0:0{1}}.raw'.format(self.__index__-1, 2) # logging.debug(function_name + ": " + filename) # else: # filename = 'v_' + str(self.__width__) + 'x' + \ # str(self.__height__) + '@' + str(self.__raw_framerate__) + \ # '_I420_{0:0{1}}.raw'.format(self.__index__, 2) # logging.debug(function_name + ": " + filename) elif t == Gst.MessageType.STATE_CHANGED: # if rtsp-sink or file-sink if ( message.src.name == 'rtsp-sink' or message.src.name == 'file-sink' ): s = message.get_structure() # is PLAYING if ( s.get_value('new-state') == Gst.State.PLAYING and s.get_value('pending-state') == Gst.State.VOID_PENDING ): if message.src.name == 'rtsp-sink': logging.info("Streaming started") else: if ( self.__model__ == 'imx219' and self.__source__.get_property('annotation-mode') == 0x00000000 ): self.__source__.set_property( 'annotation-mode', 0x0000040C) if self.__model__ == 'ov9281': self.set_stats(self.__stats__) self.send_keyframe() logging.info("Recording started") # and request was executed in unsafe context if not self.__safe__: # if not unblocked by an error if self.__main_lock__.locked(): # unblock concurrent requests self.__main_lock__.release() logging.debug( function_name + ": self.__main_lock__.release()") # and request was executed in safe context else: # if not unblocked by an error if self.__restart_lock__.locked(): # unblock execution self.__restart_lock__.release() logging.debug( function_name + ": self.__restart_lock__.release()") return Gst.BusSyncReply.PASS def __get_source__(self): """ Return camera source Returns: GstRpiCamSrc: camera source """ if self.__model__ == 'imx219': source = Gst.ElementFactory.make('rpicamsrc', 'camera-source') source.set_property('preview', 0) source.set_property('annotation-mode', self.__stats__) source.set_property( 'annotation-text', 'Copyright (c) 2021 Marcin Sielski\n\n' + self.__model__ + ' ') # NOTE(marcin.sielski): camera-timeout property is not available in # regular GStreamer builds. source.set_property('camera-timeout', self.__camera_timeout__) # Quality source.set_property('sensor-mode', self.__sensor_mode__) # Effects source.set_property('brightness', self.__brightness__) source.set_property('contrast', self.__contrast__) source.set_property('saturation', self.__saturation__) source.set_property('sharpness', self.__sharpness__) source.set_property('drc', self.__drc__) source.set_property('image_effect', self.__image_effect__) source.set_property('awb-mode', self.__awb_mode__) source.set_property('awb-gain-blue', self.__awb_gain_blue__) source.set_property('awb-gain-red', self.__awb_gain_red__) # Controls source.set_property('exposure-mode', self.__exposure_mode__) source.set_property('metering-mode', self.__metering_mode__) source.set_property('exposure-compensation', self.__exposure_compensation__) source.set_property('iso', self.__iso__) source.set_property('shutter-speed', self.__shutter_speed__) source.set_property('video-stabilisation', self.__video_stabilisation__) # Orientation source.set_property('rotation', self.__rotation__) source.set_property('hflip', self.__hflip__) source.set_property('vflip', self.__vflip__) source.set_property('video-direction', self.__video_direction__) if self.__model__ == 'ov9281': source = Gst.ElementFactory.make('arducamsrc', 'camera-source') # Controls source.set_property('exposure-mode', self.__exposure_mode__) source.set_property('shutter-speed', self.__shutter_speed__) source.set_property('gain', self.__gain__) source.set_property('awb', self.__awb__) # Orientation source.set_property('hflip', self.__hflip__) source.set_property('vflip', self.__vflip__) return source # Quality def set_resolution(self, width, height): """ Set resolution of the video Args: width (int): width of the video height (int): height of the video """ self.__width__ = width self.__height__ = height if ( self.__sensor_mode__ == 6 and self.__width__ == 1280 and self.__height__ == 720 and self.__framerate__ > 60 ): self.__framerate__ = 60 self.restart() def set_framerate(self, framerate): """ Set framerate of the video stream Args: framerate (int): framerate of the video stream """ self.__framerate__ = framerate self.restart() def set_bitrate_mode(self, bitrate_mode): """ Set desired bitrate mode of the video stream Args: bitrate_mode (int): desired bitrate mode of the video stream """ self.__bitrate_mode__ = bitrate_mode self.restart() def set_bitrate(self, bitrate): """ Set desired bitrate of the video stream Args: bitrate (int): desired bitrate of the video stream """ self.__bitrate__ = bitrate self.restart() def set_sensor_mode(self, sensor_mode): """ Set camera sensor mode Args: sensor_mode (int): camera sensor mode """ self.__sensor_mode__ = sensor_mode if self.__model__ == 'imx219': if self.__sensor_mode__ == 0: self.__framerate__ = 15 self.__width__ = 800 self.__height__ = 608 if self.__sensor_mode__ == 1: self.__framerate__ = 15 self.__width__ = 960 self.__height__ = 544 if self.__sensor_mode__ == 2: self.__framerate__ = 15 self.__width__ = 800 self.__height__ = 608 if self.__sensor_mode__ == 3: self.__framerate__ = 15 self.__width__ = 800 self.__height__ = 608 if self.__sensor_mode__ == 4: self.__framerate__ = 15 self.__width__ = 800 self.__height__ = 608 if self.__sensor_mode__ == 5: self.__framerate__ = 15 self.__width__ = 960 self.__height__ = 544 if self.__sensor_mode__ == 6: self.__framerate__ = 40 self.__width__ = 960 self.__height__ = 544 if self.__sensor_mode__ == 7: self.__framerate__ = 40 self.__width__ = 640 self.__height__ = 480 if self.__model__ == 'ov9281': if self.__sensor_mode__ == 0: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 1: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 720 if self.__sensor_mode__ == 2: self.__framerate__ = 30 self.__width__ = 640 self.__height__ = 400 if self.__sensor_mode__ == 3: self.__framerate__ = 30 self.__width__ = 320 self.__height__ = 200 if self.__sensor_mode__ == 4: self.__framerate__ = 30 self.__width__ = 160 self.__height__ = 100 if self.__sensor_mode__ == 5: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 6: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 7: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 8: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 720 if self.__sensor_mode__ == 9: self.__framerate__ = 30 self.__width__ = 640 self.__height__ = 400 if self.__sensor_mode__ == 10: self.__framerate__ = 30 self.__width__ = 320 self.__height__ = 200 if self.__sensor_mode__ == 11: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 12: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 13: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 720 if self.__sensor_mode__ == 14: self.__framerate__ = 30 self.__width__ = 640 self.__height__ = 400 if self.__sensor_mode__ == 15: self.__framerate__ = 30 self.__width__ = 320 self.__height__ = 200 if self.__sensor_mode__ == 16: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 17: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 720 if self.__sensor_mode__ == 18: self.__framerate__ = 60 self.__width__ = 640 self.__height__ = 400 if self.__sensor_mode__ == 19: self.__framerate__ = 80 self.__width__ = 320 self.__height__ = 200 if self.__sensor_mode__ == 20: self.__framerate__ = 80 self.__width__ = 160 self.__height__ = 100 if self.__sensor_mode__ == 21: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 if self.__sensor_mode__ == 22: self.__framerate__ = 30 self.__width__ = 1280 self.__height__ = 800 self.restart() # Effects def set_brightness(self, brightness): """ Set brightness of the video Args: brightness (int): brightness of the video """ self.__brightness__ = brightness self.__source__.set_property('brightness', self.__brightness__) def set_contrast(self, contrast): """ Set contrast of video Args: contrast (int): contrast of the video """ self.__contrast__ = contrast self.__source__.set_property('contrast', self.__contrast__) def set_saturation(self, saturation): """ Set saturation of the video Args: saturation (int): saturation of the video """ self.__saturation__ = saturation self.__source__.set_property('saturation', self.__saturation__) def set_sharpness(self, sharpness): """ Set sharpness of the video Args: sharpness (int): sharpness of the video """ self.__sharpness__ = sharpness self.__source__.set_property('sharpness', self.__sharpness__) def set_drc(self, drc): """ Set Dynamic Range Control (DRC) Args: drc (int): dynamic range control level """ self.__drc__ = drc self.__source__.set_property('drc', self.__drc__) def set_image_effect(self, image_effect): """ Set image effect filter Args: image_effect (int): image effect filter """ self.__image_effect__ = image_effect self.__source__.set_property('image-effect', self.__image_effect__) def set_awb_mode(self, awb_mode): """ Set Auto White Balance mode (AWB) Args: awb_mode (int): auto white balance mode """ self.__awb_mode__ = awb_mode self.__awb_gain_blue__ = 0 self.__awb_gain_red__ = 0 self.__source__.set_property('awb-gain-blue', self.__awb_gain_blue__) self.__source__.set_property('awb-gain-red', self.__awb_gain_red__) if self.__awb_mode__ == 0 or self.__awb_mode__ == 9: self.restart() else: self.__source__.set_property('awb-mode', self.__awb_mode__) def set_awb_gain_blue(self, awb_gain_blue): """ Set White Balance Gain for blue channel and disable AWB Args: awb_gain_blue (int): white balance gain for blue channel """ self.__awb_gain_blue__ = awb_gain_blue self.__awb_mode__ = 0 self.__source__.set_property('awb-mode', self.__awb_mode__) if self.__awb_gain_blue__ == 0: self.restart() else: self.__source__.set_property( 'awb-gain-blue', self.__awb_gain_blue__) def set_awb_gain_red(self, awb_gain_red): """ Set White Balance Gain for red channel and disable AWB Args: awb_gain_red (int): white balance gain for the red channel """ self.__awb_gain_red__ = awb_gain_red self.__awb_mode__ = 0 self.__source__.set_property('awb-mode', self.__awb_mode__) if self.__awb_gain_red__ == 0: self.restart() else: self.__source__.set_property('awb-gain-red', self.__awb_gain_red__) # Controls def set_exposure_mode(self, exposure_mode): """ Set exposure mode Args: exposure_mode (int): exposure mode """ self.__exposure_mode__ = exposure_mode if self.__exposure_mode__ == 1: self.__shutter_speed__ = 0 if self.__exposure_mode__ == 0 and self.__model__ == 'imx219': self.restart() else: self.__source__.set_property('exposure-mode', self.__exposure_mode__) def set_metering_mode(self, metering_mode): """ Set metering mode Args: metering_mode (int): metering mode """ self.__metering_mode__ = metering_mode self.__source__.set_property('metering-mode', self.__metering_mode__) def set_exposure_compensation(self, exposure_compensation): """ Set exposure compensation Args: exposure_compensation (int): exposure compensation """ self.__exposure_compensation__ = exposure_compensation self.__source__.set_property('exposure-compensation', self.__exposure_compensation__) def set_iso(self, iso): """ Set ISO value Args: iso (int): ISO value """ self.__iso__ = iso self.__source__.set_property('iso', self.__iso__) def set_shutter_speed(self, shutter_speed): """ Set shutter speed in microseconds Args: shutter_speed (int): shutter speed """ self.__shutter_speed__ = shutter_speed if self.__shutter_speed__ == 0: self.__exposure_mode__ = 1 else: self.__exposure_mode__ = 0 if self.__model__ == 'imx219': self.restart() if self.__model__ == 'ov9281': self.__source__.set_property( 'shutter-speed', self.__shutter_speed__) def set_video_stabilisation(self, video_stabilisation): """ Set video stabilisation Args: video_stabilisation (int): """ self.__video_stabilisation__ = video_stabilisation if self.__video_stabilisation__: self.__source__.set_property( 'video-stabilisation', self.__video_stabilisation__) else: self.restart() def set_gain(self, gain): """ Set gain Args: gain (int): gain """ self.__gain__ = gain self.__source__.set_property('gain', self.__gain__) def set_awb(self, awb): """ Set awb Args: awb (int): awb """ self.__awb__ = awb self.__source__.set_property('awb', self.__awb__) # Orientation def set_rotation(self, rotation): """ Set rotation of the video stream Args: rotation (int): rotation of the video stream """ self.__rotation__ = rotation self.__source__.set_property('rotation', self.__rotation__) self.__flip__() def __flip__(self): """ Calculate video orientation based on rotation, hflip and vflip """ if ( ( self.__rotation__ == 0 and self.__hflip__ == False and self.__vflip__ == False ) or ( self.__rotation__ == 180 and self.__hflip__ == True and self.__vflip__ == True ) ): self.__video_direction__ = 0 if ( ( self.__rotation__ == 90 and self.__hflip__ == False and self.__vflip__ == False ) or ( self.__rotation__ == 270 and self.__hflip__ == True and self.__vflip__ == True ) ): self.__video_direction__ = 1 if ( ( self.__rotation__ == 0 and self.__hflip__ == True and self.__vflip__ == True ) or ( self.__rotation__ == 180 and self.__hflip__ == False and self.__vflip__ == False ) ): self.__video_direction__ = 2 if ( ( self.__rotation__ == 90 and self.__hflip__ == True and self.__vflip__ == True ) or ( self.__rotation__ == 270 and self.__hflip__ == False and self.__vflip__ == False ) ): self.__video_direction__ = 3 if ( ( self.__rotation__ == 0 and self.__hflip__ == True and self.__vflip__ == False ) or ( self.__rotation__ == 180 and self.__hflip__ == False and self.__vflip__ == True ) ): self.__video_direction__ = 4 if ( ( self.__rotation__ == 0 and self.__hflip__ == False and self.__vflip__ == True ) or ( self.__rotation__ == 180 and self.__hflip__ == True and self.__vflip__ == False ) ): self.__video_direction__ = 5 if ( ( self.__rotation__ == 90 and self.__hflip__ == False and self.__vflip__ == True ) or ( self.__rotation__ == 270 and self.__hflip__ == True and self.__vflip__ == False ) ): self.__video_direction__ = 6 if ( ( self.__rotation__ == 90 and self.__hflip__ == True and self.__vflip__ == False ) or ( self.__rotation__ == 270 and self.__hflip__ == False and self.__vflip__ == True ) ): self.__video_direction__ = 7 def set_hflip(self, hflip): """ Set horizontal video stream flip Args: hflip (bool): True if video shall be horizontally flipped, False otherwise """ self.__hflip__ = hflip self.__source__.set_property('hflip', self.__hflip__) self.__flip__() def set_vflip(self, vflip): """ Set vertical video stream flip Args: vflip (bool): True if video stream shall be vertically flipped, False otherwise """ self.__vflip__ = vflip self.__source__.set_property('vflip', self.__vflip__) self.__flip__() def set_video_direction(self, video_direction): """ Set video stream direction Args: video_direction (int): direction of the video stream """ self.__video_direction__ = video_direction if self.__video_direction__ == 0: self.__rotation__ = 0 self.__hflip__ = False self.__vflip__ = False if self.__video_direction__ == 1: self.__rotation__ = 90 self.__hflip__ = False self.__vflip__ = False if self.__video_direction__ == 2: self.__rotation__ = 180 self.__hflip__ = False self.__vflip__ = False if self.__video_direction__ == 3: self.__rotation__ = 270 self.__hflip__ = False self.__vflip__ = False if self.__video_direction__ == 4: self.__rotation__ = 0 self.__hflip__ = True self.__vflip__ = False if self.__video_direction__ == 5: self.__rotation__ = 0 self.__hflip__ = False self.__vflip__ = True if self.__video_direction__ == 6: self.__rotation__ = 90 self.__hflip__ = False self.__vflip__ = True if self.__video_direction__ == 7: self.__rotation__ = 270 self.__hflip__ = False self.__vflip__ = True self.__source__.set_property( 'video-direction', self.__video_direction__) def __on_stats__(self): """ Callback function executed in the background to collect statistics """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if self.__model__ == 'imx219': if self.__stats__ == 0x0000040C or self.__stats__ == 0x00000000: self.__stats_id__ = 0 logging.debug(function_name + ": false") return False self.__source__.set_property( 'annotation-text', 'CPU: ' + str(psutil.cpu_percent()) + '% MEM: ' + str(psutil.virtual_memory().percent) + '% TMP: ' + str(round(CPUTemperature().temperature, 1)) + 'C DSK: ' + str(round(DiskUsage().usage, 1)) + '% THR: ' + subprocess.check_output( ['vcgencmd', 'get_throttled']).decode('utf-8').replace( 'throttled=','').strip() + '\n\n' + self.__model__ + ' ') if self.__model__ == 'ov9281': tm = time.localtime() if self.__record__ and self.__stats__ == 0x00000000: self.__overlay__.set_property( 'text', str(tm.tm_hour) + ':' + str(tm.tm_min).zfill(2) + ':' + str(tm.tm_sec).zfill(2) + ' ' + str(tm.tm_mon) + '/' + str(tm.tm_mday) + '/' + str(tm.tm_year)) else: shutter_speed = self.__source__.get_property('shutter-speed') self.__overlay__.set_property( 'text', 'CPU: ' + str(psutil.cpu_percent()) + '% MEM: ' + str(psutil.virtual_memory().percent) + '% TMP: ' + str(round(CPUTemperature().temperature, 1)) + 'C DSK: ' + str(round(DiskUsage().usage, 1)) + '% THR: ' + subprocess.check_output( ['vcgencmd', 'get_throttled']).decode('utf-8').replace( 'throttled=','').strip() + '\n' + self.__model__ + ' ' + str(tm.tm_hour) + ':' + str(tm.tm_min).zfill(2) + ':' + str(tm.tm_sec).zfill(2) + ' ' + str(tm.tm_mon) + '/' + str(tm.tm_mday) + '/' + str(tm.tm_year) + '\n' + 'Shutter (current: ' + str(shutter_speed) + ', range: 30000)' ) logging.debug(function_name + ": true") return True def set_stats(self, stats): """ Set stats overlay on the video stream Args: stats (int): stats to overlay on the video stream """ if self.__model__ == 'imx219': if self.__stats_id__ != 0: GLib.source_remove(self.__stats_id__) self.__stats_id__ = 0 self.__source__.set_property( 'annotation-text', 'Copyright (c) 2021 Marcin Sielski\n\n' + self.__model__ + ' ') if self.__record__ and stats == 0x00000000: self.__stats__ = 0x0000040C else: self.__stats__ = stats self.__stats_id__ = GLib.timeout_add_seconds( 1, self.__on_stats__) self.__source__.set_property('annotation-mode', self.__stats__) if self.__model__ == 'ov9281': if self.__stats_id__ != 0: GLib.source_remove(self.__stats_id__) self.__stats_id__ = 0 if self.__record__ and stats == 0x00000000: self.__stats__ = stats tm = time.localtime() self.__overlay__.set_property( 'text', str(tm.tm_hour) + ':' + str(tm.tm_min).zfill(2) + ':' + str(tm.tm_sec).zfill(2) + ' ' + str(tm.tm_mon) + '/' + str(tm.tm_mday) + '/' + str(tm.tm_year)) self.__overlay__.set_property('silent', False) self.__stats_id__ = GLib.timeout_add_seconds( 1, self.__on_stats__) else: self.__stats__ = stats tm = time.localtime() if stats == 0x00000000 or (stats == 0x00000000 and not self.__record__): self.__overlay__.set_property('silent', True) else: self.__overlay__.set_property( 'text', 'Copyright (c) 2021 Marcin Sielski\n' + self.__model__ + ' ' + str(tm.tm_hour).zfill(2) + ':' + str(tm.tm_min).zfill(2) + ':' + str(tm.tm_sec).zfill(2) + ' ' + str(tm.tm_mon) + '/' + str(tm.tm_mday) + '/' + str(tm.tm_year)) self.__overlay__.set_property('silent', False) self.__stats_id__ = GLib.timeout_add_seconds( 1, self.__on_stats__) def __enable_disable_rtsp__(self, pad, info): """ Enable or disable RTSP streaming Args: pad (Pad): probe pad info (PadProbeInfo): pad probe info Returns: PadProbeReturn: DROP data in data probes """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") pad.remove_probe(info.id) # if this is start streaming request if self.__rtsp__: # create pipeline self.__rtsp_queue__ = Gst.ElementFactory.make('queue', 'rtsp-queue') self.__rtsp_sink__ = Gst.ElementFactory.make('udpsink', 'rtsp-sink') self.__rtsp_sink__.set_property('host', '127.0.0.1') self.__rtsp_sink__.set_property('port', 3141) self.__rtsp_sink__.set_property('sync', False) self.__pipeline__.add(self.__rtsp_queue__) self.__pipeline__.add(self.__rtsp_sink__) self.__rtsp_tee__.link(self.__rtsp_queue__) self.__rtsp_queue__.link(self.__rtsp_sink__) self.__rtsp_queue__.set_state(Gst.State.PLAYING) self.__rtsp_sink__.set_state(Gst.State.PLAYING) # if this is stop streaming request else: # destroy pipeline self.__rtsp_queue__.set_state(Gst.State.NULL) self.__rtsp_sink__.set_state(Gst.State.NULL) self.__pipeline__.remove(self.__rtsp_queue__) self.__pipeline__.remove(self.__rtsp_sink__) logging.info("Streaming stopped") # if function is execute in the unsafe context if not self.__safe__: # if not unblocked by an error if self.__main_lock__.locked(): self.__main_lock__.release() logging.debug( function_name + ": self.__main_lock__.release()") # if function is executed in safe context else: # if not unblocked by an error if self.__restart_lock__.locked(): self.__restart_lock__.release() logging.debug( function_name + ": self.__restart_lock__.release()") logging.debug(function_name + ": return Gst.PadProbeReturn.DROP") return Gst.PadProbeReturn.DROP def set_rtsp(self, rtsp, safe=False): """ Set RTSP streaming Args: rtsp (bool): True if streaming shall be enabled, False otherwise safe (bool): indicate if function is executed in safe context """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug( function_name + ": rtsp=" + str(rtsp) + ", safe=" + str(safe)) # discard invalid requests if self.__rtsp__ == rtsp: logging.warning("Discarding invalid RTSP request") logging.debug(function_name + ": exit") return # if function is executed in unsafe context if not safe: # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning("Discarding RTSP request due to pending error") logging.debug(function_name + ": exit") return # block concurrent requests logging.debug( function_name + ": self.__main_lock__.acquire(blocking=True)") self.__main_lock__.acquire(blocking=True) # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning("Discarding RTSP request due to pending error") logging.debug(function_name + ": exit") return self.__safe__ = safe self.__rtsp__ = rtsp #srcpad = self.__source__.get_static_pad( "src") srcpad = self.__payloader__.get_static_pad( "src") srcpad.add_probe( Gst.PadProbeType.BLOCK_DOWNSTREAM, self.__enable_disable_rtsp__) logging.debug(function_name + ": exit") def __push_eos__(self): """ Push EOS to the sinkpad """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") sinkpad = self.__file_queue__.get_static_pad("src").get_peer() logging.info("Pushing EOS event on pad " + sinkpad.name) self.__pipeline__.set_property("message-forward", True) sinkpad.send_event(Gst.Event.new_eos()) logging.debug(function_name + ": exit") def __on_store__(self): """ Store data on disk callback. """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if self.__persistent__: logging.info("Writing parameters to 'camera.json' file") with open('camera.json', 'w') as config: config.write(self.get_parameters()) os.system('sudo fake-hwclock') os.sync() logging.debug(function_name + ": exit") def __on_format_location__(self, splitmux, fragment_id): """ format-location callback executed when new file is about to be created Args: splitmux (GstSplitMuxSink): splitmux sink element fragment_id (int): fragment id Returns: str: file name """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": fragment_id=" + str(fragment_id)) if self.__format__: result = 'v_' + str(self.__width__) + 'x' + str(self.__height__) + \ '_HYUV_{0:0{1}}.mkv'.format(fragment_id, 2) else: result = 'v_' + str(self.__width__) + 'x' + str(self.__height__) + \ '_H264_{0:0{1}}.mp4'.format(fragment_id, 2) self.__fragment_id__ = fragment_id + 1 GLib.timeout_add_seconds(0, self.__on_store__) logging.debug(function_name + ": return " + result) return result # def __on_overrun__(self, queue): # """ # Queue overrun callback executed when queue is full # Args: # queue (GstQueue): queue # """ # function_name = "'" + threading.currentThread().name + "'." + \ # type(self).__name__ + '.' + inspect.currentframe().f_code.co_name # logging.debug(function_name + ": entry") # self.__raw_framerate__ = self.__raw_framerate__ - 1 # if self.__raw_framerate__ <= 0: # self.__raw_framerate__ = 1 # self.__file_rate__.set_property('max-rate', self.__raw_framerate__) # self.__file_sink__.set_property( # 'location', 'v_' + str(self.__width__) + 'x' + # str(self.__height__) + '@' + str(self.__raw_framerate__) + # '_I420_%02d.raw') # logging.debug(function_name + ": exit") def __enable_disable_record__(self, pad, info): """ Enable or disable video recording Args: pad (Pad): probe pad info (PadProbeInfo): pad probe info Returns: PadProbeReturn: DROP data in data probes """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") # if this is record request if self.__record__: # create pipeline pad.remove_probe(info.id) self.__file_queue__ = Gst.ElementFactory.make('queue', 'file-queue') self.__file_queue__.set_property( 'max-size-bytes', 0) self.__file_queue__.set_property( 'max-size-buffers', 0) self.__file_queue__.set_property('max-size-time', 0) if self.__format__: if self.__throughput__ > 0: #self.__file_queue__.set_property('leaky', 1) # self.__file_queue__.connect( # 'overrun', self.__on_overrun__) self.__file_rate__ = Gst.ElementFactory.make( 'videorate', 'rate') # estimate required throughput throughput = round(self.__width__ * self.__height__ * 12 *\ self.__framerate__ / (8 * 1024 * 1024)) if throughput > self.__throughput__: self.__raw_framerate__ = \ round(self.__throughput__ * 8 * 1024 * 1024 / (self.__width__ * self.__height__ * 12)) if self.__raw_framerate__ <= 1: self.__raw_framerate__ = 1 else: self.__raw_framerate__ = self.__framerate__ logging.debug( function_name + ': self.__raw__framerate__=' + str(self.__raw_framerate__)) self.__file_rate__.set_property( 'max-rate', self.__raw_framerate__) self.__file_rate__.set_property('drop-only', True) self.__file_converter__ =\ Gst.ElementFactory.make('videoconvert','converter') self.__file_encoder__ = \ Gst.ElementFactory.make('avenc_huffyuv','file-encoder') self.__file_muxer__ = \ Gst.ElementFactory.make('matroskamux','file-muxer') self.__file_sink__ = Gst.ElementFactory.make( 'splitmuxsink', 'file-sink') self.__file_sink__.set_property( 'muxer', self.__file_muxer__) self.__file_sink__.set_property( 'max-size-time', self.__max_size_time__) self.__file_sink__.set_property( 'max-size-bytes', self.__max_size_bytes__) self.__file_sink__.set_property('max-files', self.__max_files__) logging.debug( function_name + ": self.__fragment_id__=" + str(self.__fragment_id__)) self.__file_sink__.set_property( 'start-index', self.__fragment_id__) self.__file_sink__.connect( 'format-location', self.__on_format_location__) self.__file_sink__.set_property( 'location', 'v_' + str(self.__width__) + 'x' + str(self.__height__) + '_HYUV_{0:0{1}}.mkv'.format(self.__fragment_id__, 2)) # self.__file_sink__ = Gst.ElementFactory.make( # 'multifilesink', 'file-sink') # self.__file_sink__.set_property( # 'location', 'v_' + str(self.__width__) + 'x' + # str(self.__height__) + '@' + str(self.__raw_framerate__) + # '_I420_%02d.raw') # self.__index_changed__ = False # self.__file_sink__.set_property( # 'max-file-duration', self.__max_size_time__) # self.__file_sink__.set_property( # 'max-file-size', self.__max_size_bytes__) # self.__file_sink__.set_property('max-files', self.__max_files__) # if self.__max_size_time__ == 0 and self.__max_size_bytes__ == 0: # self.__next_file__ = 3 # elif self.__max_size_time__ > 0 and self.__max_size_bytes__ > 0: # # NOTE(marcin.sielski): This is new option not available in # # regular GStreamer builds. # self.__next_file__ = 6 # elif self.__max_size_time__ > 0: # self.__next_file__ = 5 # elif self.__max_size_bytes__ > 0: # self.__next_file__ = 4 # logging.debug( # function_name + ": self.__next_file__=" + # str(self.__next_file__)) # self.__file_sink__.set_property('next-file', self.__next_file__) # self.__file_sink__.set_property('post-messages', True) # self.__file_sink__.set_property('async', True) # self.__file_sink__.set_property('sync', False) # if self.__index__ >= 10: # self.__index__ = 0 # logging.debug( # function_name + ": self.__index__=" + str(self.__index__)) # self.__file_sink__.set_property('index', self.__index__) else: self.__file_sink__ = Gst.ElementFactory.make( 'splitmuxsink', 'file-sink') self.__file_sink__.set_property( 'max-size-time', self.__max_size_time__) self.__file_sink__.set_property( 'max-size-bytes', self.__max_size_bytes__) self.__file_sink__.set_property('max-files', self.__max_files__) logging.debug( function_name + ": self.__fragment_id__=" + str(self.__fragment_id__)) self.__file_sink__.set_property( 'start-index', self.__fragment_id__) self.__file_sink__.connect( 'format-location', self.__on_format_location__) self.__file_sink__.set_property( 'location', 'v_' + str(self.__width__) + 'x' + str(self.__height__) + '_H264_{0:0{1}}.mp4'.format(self.__fragment_id__, 2)) self.__file_sink__.set_property('send-keyframe-requests', True) self.__pipeline__.add(self.__file_queue__) self.__pipeline__.add(self.__file_sink__) if self.__format__: if self.__throughput__ > 0: self.__pipeline__.add(self.__file_rate__) self.__pipeline__.add(self.__file_converter__) self.__pipeline__.add(self.__file_encoder__) self.__raw_tee__.link(self.__file_queue__) if self.__throughput__ > 0: self.__file_queue__.link(self.__file_rate__) self.__file_rate__.link(self.__file_converter__) else: self.__file_queue__.link(self.__file_converter__) self.__file_converter__.link(self.__file_encoder__) self.__file_encoder__.link(self.__file_sink__) if self.__throughput__ > 0: self.__file_rate__.set_state(Gst.State.PLAYING) self.__file_converter__.set_state(Gst.State.PLAYING) self.__file_encoder__.set_state(Gst.State.PLAYING) else: self.__h264_tee__.link(self.__file_queue__) self.__file_queue__.link(self.__file_sink__) self.__file_queue__.set_state(Gst.State.PLAYING) self.__file_sink__.set_state(Gst.State.PLAYING) logging.debug(function_name + ": return Gst.PadProbeReturn.DROP") return Gst.PadProbeReturn.DROP def set_record(self, record, safe=False): """ Set video recording Args: record (bool): True if recording shall started, False otherwise safe (bool): indicate if function is executed from safe context """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug( function_name + ": record=" + str(record) + ", safe=" + str(safe)) # discard invalid requests if self.__record__ == record: logging.warning("Discarding invalid record request") logging.debug(function_name + ": exit") return # if function is executed in unsafe context if not safe: # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning( "Discarding record request due to pending error") logging.debug(function_name + ": exit") return # block concurrent requests logging.debug( function_name + ": self.__main_lock__.acquire(blocking=True)") self.__main_lock__.acquire(blocking=True) # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning( "Discarding record request due to pending error") logging.debug(function_name + ": exit") return self.__safe__ = safe self.__record__ = record if self.__record__: srcpad = None if self.__format__: srcpad = self.__source__.get_static_pad( "src") else: srcpad = self.__encoder__.get_static_pad( "src") srcpad.add_probe( Gst.PadProbeType.BLOCK_DOWNSTREAM, self.__enable_disable_record__) else: # if self.__format__: # self.__index__ = self.__index__ + 1 # if self.__index__ >= 10: # self.__index__ = 0 srcpad = self.__file_queue__.get_static_pad("src") self.probe_id = srcpad.add_probe( Gst.PadProbeType.BLOCK | Gst.PadProbeType.BUFFER, self.__enable_disable_record__) threading.Thread(target=self.__push_eos__, args=()).start() logging.debug(function_name + ": exit") def remove(self, filename): """ Remove file from media folder Args: filename (str): name of the file to remove """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": filename=" + str(filename)) if filename == '': _, _, filenames = next(os.walk('.')) media = [] for filename in filenames: if filename.endswith('.mkv') or filename.endswith('.mp4'): if os.path.exists(filename): os.remove(filename) else: logging.warning( function_name + ": filename=" + str(filename) + " does not exist") else: if os.path.exists(filename): os.remove(filename) else: logging.warning( function_name + ": filename=" + str(filename) + " does not exist") logging.debug(function_name + ": exit") def restart(self, safe=False): """ Restart Camera Server Args: safe (bool): indicate if function is executed in the safe context """ #snapshot1 = tracemalloc.take_snapshot() function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": safe=" + str(safe)) # if function is executed in unsafe context if not safe: # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return # block concurrent requests logging.debug( function_name + ": self.__main_lock__.acquire(blocking=True)") self.__main_lock__.acquire(blocking=True) # if error is pending if self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return rtsp = self.__rtsp__ record = self.__record__ # if server is streaming if self.__rtsp__: # stop streaming logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.set_rtsp(False, True) logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") # if server is recording if self.__record__: # stop recording logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.set_record(False, True) logging.debug( function_name + ": __restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") super().restart() # if server was streaming before the restart if rtsp: # start streaming logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.set_rtsp(True, True) logging.debug( function_name + ": __restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") # if server was recording before the restart if record: # start recording logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.set_record(True, True) logging.debug( function_name + ": self.__restart_lock__.acquire(blocking=True)") self.__restart_lock__.acquire(blocking=True) # if function is executed in unsafe context and if error is pending if not safe and self.__error_lock__.locked(): # discard the request logging.warning( "Discarding restart request due to pending error") logging.debug(function_name + ": exit") return self.__restart_lock__.release() logging.debug(function_name + ": self.__restart_lock__.release()") # if function is executed in unsafe context and was not unblocked by # an error if not safe and self.__main_lock__.locked(): # unblock concurrent requests self.__main_lock__.release() logging.debug(function_name + ": self.__main_lock__.release()") logging.debug(function_name + ": exit") #snapshot2 = tracemalloc.take_snapshot() #top_stats = snapshot2.compare_to(snapshot1, 'lineno') #for stat in top_stats[:10]: # print(stat) def set_format(self, format): """ Set format of recorded video file Args: format (bool): format of recorded video file """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": format="+str(format)) self.__format__ = format if self.__record__: self.set_record(False) self.set_record(True) logging.debug(function_name + ": exit") def set_max_files(self, max_files): """ Set maximum number of recorded video files to keep on storage device Once the maximum is reached old files start to be deleted to make room for new ones Args: max_files (int): maximum number of recorded video files """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": max_files=" + str(max_files)) self.__max_files__ = max_files if self.__file_sink__ is not None: self.__file_sink__.set_property('max-files', self.__max_files__) logging.debug(function_name + ": exit") def set_max_size_bytes(self, max_size_bytes): """ Set maximum size of the recorded video file in bytes Args: max_size_bytes (int): maximum size of the recorded video file in bytes """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": max_size_bytes=" + str(max_size_bytes)) self.__max_size_bytes__ = max_size_bytes if self.__record__: self.set_record(False) self.set_record(True) logging.debug(function_name + ": exit") def set_max_size_time(self, max_size_time): """ Set maximum size of the recorded video file in nanoseconds Args: max_size_time (int): maximum size of the recorded video file in nanoseconds """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": max_size_time=" + str(max_size_time)) self.__max_size_time__ = max_size_time if self.__record__: self.set_record(False) self.set_record(True) logging.debug(function_name + ": exit") def set_persistent(self, persistent): """ Set configuration persistence Args: persistent (bool): True if configuration shall be persistent, False otherwise """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": persistent=" + str(persistent)) self.__persistent__ = persistent logging.debug(function_name + ": exit") def set_continuation(self, continuation): """ Set configuration continuation Args: continuation (bool): True if recording shall continuation from last fragment id, False otherwise """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": continuation=" + str(continuation)) self.__continuation__ = continuation logging.debug(function_name + ": exit") def set_logging_level(self, logging_level): """ Set logging level Args: logging_level (int): logging level """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": logging_level=" + str(logging_level)) self.__logging_level__ = logging_level root = logging.getLogger() for h in root.handlers[:]: root.removeHandler(h) h.close() if self.__default_logging_level__ != 0: if self.__logging_level__ == 0: Gst.debug_set_active(False) logging.basicConfig( format="%(asctime)s %(levelname)s: %(message)s", level=self.__default_logging_level__) else: Gst.debug_set_colored(False) Gst.debug_set_default_threshold((50-self.__logging_level__+10)/10) Gst.debug_set_active(True) logging.basicConfig( format="%(asctime)s %(levelname)s: %(message)s", level=self.__logging_level__) logging.debug(function_name + ": exit") class Servers(object): """ Servers """ def __init__(self, servers): """ Initialize servers Args: servers (list): list of servers """ self.__servers__ = servers self.__threads__ = [] def start(self): """ Starts servers """ for server in self.__servers__: thread = threading.Thread( name=name(server) + ' Thread', target=server.start, args=()) thread.deamon = True thread.start() self.__threads__.append(thread) def stop(self): """ Stop servers """ for server in self.__servers__: server.stop() for thread in self.__threads__: thread.join() class CameraService: """ Camera Handler """ def __init__(self): """ Initialize Camera Service """ signal.signal(signal.SIGTERM, self.stop) #signal.signal(signal.SIGKILL, self.stop) self.__running__ = False def get_parser(self): """ Parse input arguments Returns: parser (ArgumentParser): argument parser """ # NOTE(marcin.sielski): Do not put here logging statements. parser = ArgumentParser() parser.add_argument( '-d', '--debug', type=str, nargs='?', const='DEBUG', default='INFO', help="enable debug level (DEBUG by default): NOTSET, DEBUG, INFO, " "WARNING, ERROR, CRITICAL") parser.add_argument( '-c', '--camera_timeout', type=int, nargs='?', const=7500, default=0, help="set camera timeout (Infinite by default)") # NOTE(marcin.sielski): Magic number 1 MiB/s depends on underlying # hardware capabilities and was estimated experimentally for # SanDisk Extreme 64 GB and overclocked SD Host Controller. parser.add_argument( '-t', '--throughput', type=int, nargs='?', const=1, default=1, help="set camera timeout (1 MiB by default)") return parser def start(self, args): """ Start servers """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") logging.info(name(self) + " started") Gst.init(None) camera_server = CameraServer(args) self.__servers__ = Servers( [HTTPSServer(camera_server), camera_server, RTSPServer(camera_server)]) self.__servers__.start() self.__running__ = True logging.debug(function_name + ": exit") def stop(self, signum=None, frame=None): """ Stop servers """ function_name = "'" + threading.currentThread().name + "'." + \ type(self).__name__ + '.' + inspect.currentframe().f_code.co_name logging.debug(function_name + ": entry") if self.__running__: self.__running__ = False self.__servers__.stop() logging.info(name(self) + " stopped") logging.debug(function_name + ": exit") if __name__ == '__main__': """ Camera Service entry method """ camera_service = CameraService() args = camera_service.get_parser().parse_args() if getattr(logging, args.debug.upper()): logging.basicConfig( format="%(asctime)s %(levelname)s: %(message)s", level=getattr(logging, args.debug.upper())) try: with picamera.PiCamera() as camera: logging.info("'" + camera.revision + "' camera detected") except: if camera_revision() != "ov9281": logging.critical("Unable to acquire camera") exit(-1) camera_service.start(args) try: pause() except KeyboardInterrupt: pass camera_service.stop()
solariot.py
#!/usr/bin/env python # Copyright (c) 2017 Dennis Mellican # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from pymodbus.payload import BinaryPayloadDecoder from pymodbus.constants import Endian from pymodbus.client.sync import ModbusTcpClient from SungrowModbusTcpClient import SungrowModbusTcpClient from influxdb import InfluxDBClient import paho.mqtt.client as mqtt import config import dweepy import json import time import datetime import requests from threading import Thread MIN_SIGNED = -2147483648 MAX_UNSIGNED = 4294967295 requests.packages.urllib3.disable_warnings() print ("Load config %s" % config.model) # SMA datatypes and their register lengths # S = Signed Number, U = Unsigned Number, STR = String sma_moddatatype = { 'S16':1, 'U16':1, 'S32':2, 'U32':2, 'U64':4, 'STR16':8, 'STR32':16 } # Load the modbus register map for the inverter modmap_file = "modbus-" + config.model modmap = __import__(modmap_file) # This will try the Sungrow client otherwise will default to the standard library. if 'sungrow-' in config.model: print ("Load SungrowModbusTcpClient") client = SungrowModbusTcpClient.SungrowModbusTcpClient(host=config.inverter_ip, timeout=config.timeout, RetryOnEmpty=True, retries=3, port=config.inverter_port) else: print ("Load ModbusTcpClient") client = ModbusTcpClient(host=config.inverter_ip, timeout=config.timeout, RetryOnEmpty=True, retries=3, port=config.inverter_port) print("Connect") client.connect() client.close() if not hasattr(config, 'dweepy_uuid'): config.dweepy_uuid = None try: mqtt_client = mqtt.Client('pv_data') if hasattr(config, 'mqtt_username'): mqtt_client.username_pw_set(config.mqtt_username,config.mqtt_password) mqtt_client.connect(config.mqtt_server, port=config.mqtt_port) except: mqtt_client = None try: flux_client = InfluxDBClient(config.influxdb_ip, config.influxdb_port, config.influxdb_user, config.influxdb_password, config.influxdb_database, ssl=config.influxdb_ssl, verify_ssl=config.influxdb_verify_ssl) except: flux_client = None inverter = {} bus = json.loads(modmap.scan) def load_registers(type,start,COUNT=100): try: if type == "read": rr = client.read_input_registers(int(start), count=int(COUNT), unit=config.slave) elif type == "holding": rr = client.read_holding_registers(int(start), count=int(COUNT), unit=config.slave) if len(rr.registers) != int(COUNT): print("[WARN] Mismatched number ({}) of registers read".format(len(rr.registers))) return for num in range(0, int(COUNT)): run = int(start) + num + 1 if type == "read" and modmap.read_register.get(str(run)): if '_10' in modmap.read_register.get(str(run)): inverter[modmap.read_register.get(str(run))[:-3]] = float(rr.registers[num])/10 else: inverter[modmap.read_register.get(str(run))] = rr.registers[num] elif type == "holding" and modmap.holding_register.get(str(run)): inverter[modmap.holding_register.get(str(run))] = rr.registers[num] except Exception as err: print("[WARN] No data. Try increasing the timeout or scan interval.") ## function for polling data from the target and triggering writing to log file if set # def load_sma_register(registers): from pymodbus.payload import BinaryPayloadDecoder from pymodbus.constants import Endian import datetime ## request each register from datasets, omit first row which contains only column headers for thisrow in registers: name = thisrow[0] startPos = thisrow[1] type = thisrow[2] format = thisrow[3] ## if the connection is somehow not possible (e.g. target not responding) # show a error message instead of excepting and stopping try: received = client.read_input_registers(address=startPos, count=sma_moddatatype[type], unit=config.slave) except: thisdate = str(datetime.datetime.now()).partition('.')[0] thiserrormessage = thisdate + ': Connection not possible. Check settings or connection.' print( thiserrormessage) return ## prevent further execution of this function message = BinaryPayloadDecoder.fromRegisters(received.registers, byteorder=Endian.Big, wordorder=Endian.Big) ## provide the correct result depending on the defined datatype if type == 'S32': interpreted = message.decode_32bit_int() elif type == 'U32': interpreted = message.decode_32bit_uint() elif type == 'U64': interpreted = message.decode_64bit_uint() elif type == 'STR16': interpreted = message.decode_string(16) elif type == 'STR32': interpreted = message.decode_string(32) elif type == 'S16': interpreted = message.decode_16bit_int() elif type == 'U16': interpreted = message.decode_16bit_uint() else: ## if no data type is defined do raw interpretation of the delivered data interpreted = message.decode_16bit_uint() ## check for "None" data before doing anything else if ((interpreted == MIN_SIGNED) or (interpreted == MAX_UNSIGNED)): displaydata = None else: ## put the data with correct formatting into the data table if format == 'FIX3': displaydata = float(interpreted) / 1000 elif format == 'FIX2': displaydata = float(interpreted) / 100 elif format == 'FIX1': displaydata = float(interpreted) / 10 elif format == 'UTF8' or format == 'IP4': interpreted = interpreted.split(b'\x00', 1)[0] #remove everything after \0 displaydata = interpreted.decode('utf-8') else: displaydata = interpreted #print('************** %s = %s' % (name, str(displaydata))) inverter[name] = displaydata # Add timestamp inverter["00000 - Timestamp"] = str(datetime.datetime.now()).partition('.')[0] def publish_influx(metrics): target=flux_client.write_points([metrics]) print ("[INFO] Sent to InfluxDB") def publish_dweepy(inverter): try: result = dweepy.dweet_for(config.dweepy_uuid,inverter) print("[INFO] Sent to dweet.io") except Exception as err: print ("[ERROR] publish dweepy: %s" % err) result = None def publish_mqtt(inverter): try: result = mqtt_client.publish(config.mqtt_topic, json.dumps(inverter).replace('"', '\"')) print("[INFO] Published to MQTT") except Exception as err: print ("[ERROR] publish mqtt: %s" % err) result = None while True: try: client.connect() inverter = {} if 'sungrow-' in config.model: for i in bus['read']: load_registers("read",i['start'],i['range']) for i in bus['holding']: load_registers("holding",i['start'],i['range']) # Sungrow inverter specifics: # Work out if the grid power is being imported or exported if config.model == "sungrow-sh5k" and \ inverter['grid_import_or_export'] == 65535: export_power = (65535 - inverter['export_power']) * -1 inverter['export_power'] = export_power inverter['timestamp'] = "%s/%s/%s %s:%02d:%02d" % ( inverter['day'], inverter['month'], inverter['year'], inverter['hour'], inverter['minute'], inverter['second']) if 'sma-' in config.model: load_sma_register(modmap.sma_registers) #print(inverter) if inverter: # Inverter data read (dictionary is not empty) if mqtt_client is not None: t = Thread(target=publish_mqtt, args=(inverter,)) t.start() if config.dweepy_uuid is not None: t = Thread(target=publish_dweepy, args=(inverter,)) t.start() if flux_client is not None: serial = None if hasattr(config, 'inverter_serial'): serial = config.inverter_serial if '30057 - Serial number' in inverter: if serial is not None and serial != inverter["30057 - Serial number"]: print("[WARN] configuration serial and Modbus serial are not equal (% != %)" % (serial, inverter["30057 - Serial number"])) serial = inverter["30057 - Serial number"] if serial is not None: metrics = {} metrics['measurement'] = serial # Measurements are identified by the device serial number metrics['fields'] = inverter t = Thread(target=publish_influx, args=(metrics,)) t.start() else: print("Serial required for influxdb (or in config, or in Modbus data") else: print("[WARN] No data from inverter") client.close() except Exception as err: #Enable for debugging, otherwise it can be noisy and display false positives: print ("[ERROR] %s" % err) client.close() client.connect() time.sleep(config.scan_interval)
main.py
import cv2 from enum import Enum import logging import threading import time import copy import numpy from pattern_type import PatternType from pattern_finder import PatternFinder from camera_calibrator import CameraCalibrator from drawing_helpers import draw_vertical_line, draw_corners, draw_grid from coordinate_mapper import find_checkerboard_corners, CoordinateMapper class Mode(Enum): Initial = 0, Calibration = 1, Calibrated = 2 pattern_dims = (5, 8) pattern_type = PatternType.Checkerboard calibrator = CameraCalibrator(pattern_type, pattern_dims, None) mode = Mode.Initial crop_scale = 0.0 def crop_frame(frame, crop_corners): target_frame = frame[ crop_corners[0][1]:crop_corners[1][1], crop_corners[0][0]:crop_corners[1][0] ] return target_frame def configure_windows(): cv2.namedWindow('distorted') cv2.namedWindow('undistorted') cv2.createTrackbar('alpha', 'undistorted', 0, 100, change_alpha) cv2.setTrackbarPos('alpha', 'undistorted', int(calibrator.alpha * 100)) cv2.createTrackbar('crop', 'undistorted', 0, 100, change_crop) def start_camera(): logging.debug("Starting camera") cap = cv2.VideoCapture(cv2.CAP_XIAPI) # cap = cv2.VideoCapture(0) if not cap.isOpened(): raise RuntimeError("Could not open camera") logging.debug("Camera started") return cap def main(): global mode logging.basicConfig(format='[%(asctime)s] [%(threadName)13s] %(levelname)7s: %(message)s', level=logging.DEBUG) #Start camera cap = start_camera() #Configure windows configure_windows() #Setup pattern finder pattern_finder = PatternFinder(pattern_type, pattern_dims) pattern_finder.start() #Setup calibration variables pattern_found = False last_calibration_sample_time = None #Setup coordinate mapper coordinate_mapper = CoordinateMapper( checkerboard_distance=0.20, checkerboard_width=0.279, checkerboard_height=0.159 ) #Setup display flags show_coordinate_grid = False while True: #Try to get a frame from camera success, distorted_frame_clean = cap.read() if not success: logging.warning("Could not retrieve frame from camera") continue # Let's just try again #Make original frame read-only distorted_frame_clean.flags.writeable = False image_size = tuple(distorted_frame_clean.shape[0:2][::-1]) calibrator.image_size = image_size #Create copy that we can draw on distorted_frame = copy.deepcopy(distorted_frame_clean) """ #Draw a grid on distorted frame draw_grid( frame=distorted_frame, x_range=(0, image_size[0], image_size[0] // 10), y_range=(0, image_size[1], image_size[1] // 10), color=(0, 0, 0), thickness=1, line_type=cv2.LINE_AA ) """ undistorted_frame_clean = None undistorted_frame = None if mode == Mode.Calibrated: #If calibration has been loaded, show the undistorted_frame_clean = cv2.remap(distorted_frame_clean, calibrator.map_x, calibrator.map_y, cv2.INTER_LINEAR) undistorted_frame_clean.flags.writeable = False undistorted_frame = cv2.remap(distorted_frame, calibrator.map_x, calibrator.map_y, cv2.INTER_LINEAR) if not pattern_finder.recognition_in_progress: #Get the results of last recognition pattern_found = pattern_finder.pattern_found pattern_points = pattern_finder.pattern_points #Start a new recognition #Calculate the corners of cropped image according to crop_scale crop_corners = ( (int(image_size[0]/2*crop_scale), int(image_size[1]/2*crop_scale)), (int(image_size[0] - image_size[0]/2*crop_scale), int(image_size[1] - image_size[1]/2*crop_scale)) ) if pattern_points is not None: pattern_points += crop_corners[0] #If calibration has been loaded, let's find pattern from calibrated image if mode == Mode.Calibrated: target_frame = undistorted_frame target_frame_clean = undistorted_frame_clean else: target_frame = distorted_frame target_frame_clean = distorted_frame_clean #Show crop on target frame with a rectangle, and crop the clean frame cv2.rectangle(target_frame, crop_corners[0], crop_corners[1], (255, 0, 0), 1, cv2.LINE_AA) cropped_frame = crop_frame(target_frame_clean, crop_corners) #Start a new pattern Recognition pattern_finder.start_pattern_recognition(cropped_frame) if pattern_found: if mode == Mode.Calibration: if last_calibration_sample_time is None or time.time() - last_calibration_sample_time > 0.2: last_calibration_sample_time = time.time() calibrator.add_sample(pattern_points) show_ui_taking_sample(distorted_frame) if calibrator.number_of_samples > 100: show_ui_calibrating(distorted_frame) calibrator.calibrate() calibrator.calculate_new_camera_matrix() calibrator.generate_maps() logging.info("Calibration Finished") logging.info("Calibration error-rate: {}".format(calibrator.accuracy)) mode = Mode.Calibrated calibrator.save_results("AutoSave") continue #Needed for initialising "undistorted_frame" # If view is calibrated, then draw chessboard on undistorted frame, otherwise use the original/distorted one. if mode == Mode.Calibrated: target_frame = undistorted_frame else: target_frame = distorted_frame cv2.drawChessboardCorners(target_frame, pattern_dims, pattern_points, pattern_found) # Find four corners of the board and use then to calculate mapping constants corners = find_checkerboard_corners(pattern_points, pattern_dims) # TODO: This should be done somehow differently? coordinate_mapper.image_dims = (distorted_frame_clean.shape[1], distorted_frame_clean.shape[0]) coordinate_mapper.calculate_constants(corners) if show_coordinate_grid: coordinate_mapper.draw_intersection_lines(target_frame, corners) coordinate_mapper.draw_grid(target_frame) draw_corners(target_frame, corners) # Vertical line in the center draw_vertical_line(target_frame, target_frame.shape[1] // 2, (100, 0, 0), 1, cv2.LINE_AA) logging.info("Constants {}".format(coordinate_mapper.constants)) # Display the distorted frame (original with additional lines) distorted_frame = cv2.resize(distorted_frame, (0, 0), fx=0.5, fy=0.5) cv2.imshow('distorted', distorted_frame) if mode == Mode.Calibrated: undistorted_frame = cv2.resize(undistorted_frame, (0, 0), fx=0.5, fy=0.5) cv2.imshow('undistorted', undistorted_frame) # TODO: Why does it work strange? #if calibrator.roi is not None: # x, y, w, h = [elem // 2 for elem in calibrator.roi] # if w != 0 and h != 0: # cropped = undistorted_frame[y:y + h, x:x + w] # cv2.imshow('undistorted_cropped', cropped) key_no = cv2.waitKey(30) & 0xFF if key_no == 255: # no key was pressed: pass elif key_no == ord('q'): logging.info("Quitting...") break elif key_no == ord('g'): show_coordinate_grid = not show_coordinate_grid elif key_no == ord('c'): logging.info("Calibration started") calibrator.clear() mode = Mode.Calibration elif key_no == ord('s'): calibrator.save_results("ManualSave") logging.info("Calibration results saved") elif key_no == ord('l'): timestamp = input("Type timestamp to load: ") try: calibrator.load_results(timestamp) except IOError: logging.exception("Could not load all calibration files.") else: mode = Mode.Calibrated calibrator.calculate_new_camera_matrix() calibrator.generate_maps() cv2.setTrackbarPos('alpha', 'undistorted', int(calibrator.alpha * 100)) logging.info("Calibration results loaded") elif key_no == ord('p'): calibrator.plot() else: print("Press:\n" "\t'q' to quit\n" "\t'c' to start calibration\n" "\t'g' to toggle grid\n" "\t's' to save calibration results\n" "\t'l' to load calibration results\n" ) ## End of the main loop ## cap.release() cv2.destroyAllWindows() last_change = None target_alpha = None def timing_function(): global last_change while True: time.sleep(0.1) if last_change is not None and time.time()-last_change > 1.0: calibrator.calculate_new_camera_matrix(target_alpha) calibrator.generate_maps() last_change = None timing_thread = threading.Thread(target=timing_function) timing_thread.daemon = True timing_thread.start() def change_alpha(value): global target_alpha, last_change if mode != Mode.Calibrated: return target_alpha = value / 100 last_change = time.time() def change_crop(value): global crop_scale crop_scale = value/100 def show_ui_calibrating(frame): cv2.rectangle( frame, (0, 0), (frame.shape[1], frame.shape[0]), (255, 255, 255), -1 ) cv2.putText(frame, "CALIBRATING...", (frame.shape[1]//3, frame.shape[0]//2), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2, cv2.LINE_AA) frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) cv2.imshow('distorted', frame) cv2.waitKey(20) def show_ui_taking_sample(frame): color = (255, 255, 255) cv2.circle(frame, (frame.shape[1]//10, frame.shape[0]//10), frame.shape[0]//25, color, -1, cv2.LINE_AA) cv2.circle(frame, (frame.shape[1]//10*9, frame.shape[0]//10), frame.shape[0]//25, color, -1, cv2.LINE_AA) cv2.circle(frame, (frame.shape[1]//10, frame.shape[0]//10*9), frame.shape[0]//25, color, -1, cv2.LINE_AA) cv2.circle(frame, (frame.shape[1]//10*9, frame.shape[0]//10*9), frame.shape[0]//20, color, -1, cv2.LINE_AA) if __name__ == "__main__": try: main() except Exception: logging.exception("Uncaught exception from main():")
SharedMemoryRunner.py
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on September 12, 2016 """ #for future compatibility with Python 3-------------------------------------------------------------- from __future__ import division, print_function, unicode_literals, absolute_import import warnings warnings.simplefilter('default',DeprecationWarning) #End compatibility block for Python 3---------------------------------------------------------------- #External Modules------------------------------------------------------------------------------------ import collections import subprocess # try : import Queue as queue # except ImportError: import queue import os import signal import copy import abc import time import ctypes import inspect #import logging, logging.handlers import threading #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ from utils import utils from BaseClasses import BaseType import MessageHandler from .InternalRunner import InternalRunner #Internal Modules End-------------------------------------------------------------------------------- class SharedMemoryRunner(InternalRunner): """ Class for running internal objects in a threaded fashion using the built-in threading library """ def __init__(self, messageHandler, args, functionToRun, identifier=None, metadata=None, uniqueHandler = "any", profile = False): """ Init method @ In, messageHandler, MessageHandler object, the global RAVEN message handler object @ In, args, dict, this is a list of arguments that will be passed as function parameters into whatever method is stored in functionToRun. e.g., functionToRun(*args) @ In, functionToRun, method or function, function that needs to be run @ In, identifier, string, optional, id of this job @ In, metadata, dict, optional, dictionary of metadata associated with this run @ In, uniqueHandler, string, optional, it is a special keyword attached to this runner. For example, if present, to retrieve this runner using the method jobHandler.getFinished, the uniqueHandler needs to be provided. If uniqueHandler == 'any', every "client" can get this runner @ In, clientRunner, bool, optional, Is this runner needed to be executed in client mode? Default = False @ In, profile, bool, optional, if True then at deconstruction timing statements will be printed @ Out, None """ ## First, allow the base class handle the commonalities # we keep the command here, in order to have the hook for running exec code into internal models super(SharedMemoryRunner, self).__init__(messageHandler, args, functionToRun, identifier, metadata, uniqueHandler, profile) ## Other parameters manipulated internally self.subque = collections.deque() #self.subque = queue.Queue() self.skipOnCopy.append('subque') def isDone(self): """ Method to check if the calculation associated with this Runner is finished @ In, None @ Out, finished, bool, is it finished? """ ## If the process has not been started yet, then return False if not self.started: return False if self.thread is None: return True else: return not self.thread.is_alive() def getReturnCode(self): """ Returns the return code from running the code. If return code not yet set, then set it. @ In, None @ Out, returnCode, int, the return code of this evaluation """ if not self.hasBeenAdded: self._collectRunnerResponse() ## Is this necessary and sufficient for all failed runs? if len(self.subque) == 0 and self.runReturn is None: self.runReturn = None self.returnCode = -1 return self.returnCode def _collectRunnerResponse(self): """ Method to add the process response in the internal variable (pointer) self.runReturn @ In, None @ Out, None """ if not self.hasBeenAdded: if len(self.subque) == 0: ## Queue is empty! self.runReturn = None else: self.runReturn = self.subque.popleft() self.hasBeenAdded = True def start(self): """ Method to start the job associated to this Runner @ In, None @ Out, None """ try: self.thread = InterruptibleThread(target = lambda q, *arg : q.append(self.functionToRun(*arg)), name = self.identifier, args=(self.subque,) + tuple(self.args)) self.thread.daemon = True self.thread.start() self.trackTime('runner_started') self.started = True except Exception as ae: self.raiseAWarning(self.__class__.__name__ + " job "+self.identifier+" failed with error:"+ str(ae) +" !",'ExceptedError') self.returnCode = -1 def kill(self): """ Method to kill the job associated to this Runner @ In, None @ Out, None """ if self.thread is not None: self.raiseADebug('Terminating job thread "{}" and RAVEN identifier "{}"'.format(self.thread.ident, self.identifier)) while self.thread is not None and self.thread.isAlive(): time.sleep(0.1) try: self.thread.raiseException(RuntimeError) except ValueError: print('DEBUGG was already terminated....') self.thread = None self.trackTime('runner_killed') ## The following code is extracted from stack overflow with some minor cosmetic ## changes in order to adhere to RAVEN code standards: ## https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python def _asyncRaise(tid, exceptionType): """ Raises an exception in the threads with id tid @ In, tid, integer, this variable represents the id of the thread to raise an exception @ In, exceptionType, Exception, the type of exception to throw @ Out, None """ if not inspect.isclass(exceptionType): raise TypeError("Only types can be raised (not instances)") res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exceptionType)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # "if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0) raise SystemError("PyThreadState_SetAsyncExc failed") class InterruptibleThread(threading.Thread): """ A thread class that supports raising exception in the thread from another thread. """ def raiseException(self, exceptionType): """ Raises the given exception type in the context of this thread. If the thread is busy in a system call (time.sleep(), socket.accept(), ...), the exception is simply ignored. If you are sure that your exception should terminate the thread, one way to ensure that it works is: t = InterruptibleThread( ... ) ... t.raiseException( SomeException ) while t.isAlive(): time.sleep( 0.1 ) t.raiseException( SomeException ) If the exception is to be caught by the thread, you need a way to check that your thread has caught it. CAREFUL : this function is executed in the context of the caller thread, to raise an excpetion in the context of the thread represented by this instance. @ In, exceptionType, Exception, the type of exception to raise in this thread @ Out, None """ if self.isAlive(): ## Assuming Python 2.6+, we can remove the need for the _get_my_tid as ## specifed in the Stack Overflow answer _asyncRaise( self.ident, exceptionType )
activethreadcount.py
from threading import * import time def display(): print(current_thread().name,'...started') time.sleep(3) print(current_thread().name,'...ended') print('The number of active Threads:',active_count()) t1=Thread(target=display,name='ChildThread-1') t2=Thread(target=display,name='ChildThread-2') t3=Thread(target=display,name='ChildThread-3') t1.start() t2.start() t3.start() l=enumerate() for t in l: print('Thread name:',t.name) print('Thread Identification Number:',t.ident) print() time.sleep(10) l=enumerate() for t in l: print('Thread name:',t.name) print('Thread Identification Number:',t.ident) print() '''The number of active Threads: 1 ChildThread-1 ...started ChildThread-2 ...started ChildThread-3 ...started Thread name: MainThread Thread Identification Number: 1592 Thread name: ChildThread-1 Thread Identification Number: 6092 Thread name: ChildThread-2 Thread Identification Number: 18712 Thread name: ChildThread-3 Thread Identification Number: 15220 ChildThread-1 ...ended ChildThread-2 ...ended ChildThread-3 ...ended Thread name: MainThread Thread Identification Number: 1592'''
profiler_test.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import glob import os import shutil import tempfile import threading import unittest from absl.testing import absltest import jax import jax.numpy as jnp import jax.profiler from jax.config import config import jax._src.test_util as jtu try: import portpicker except ImportError: portpicker = None try: from tensorflow.python.profiler import profiler_client from tensorflow.python.profiler import profiler_v2 as tf_profiler except ImportError: profiler_client = None tf_profiler = None config.parse_flags_with_absl() class ProfilerTest(unittest.TestCase): # These tests simply test that the profiler API does not crash; they do not # check functional correctness. def setUp(self): super().setUp() self.worker_start = threading.Event() self.profile_done = False @unittest.skipIf(not portpicker, "Test requires portpicker") def testStartStopServer(self): port = portpicker.pick_unused_port() jax.profiler.start_server(port=port) del port jax.profiler.stop_server() @unittest.skipIf(not portpicker, "Test requires portpicker") def testCantStartMultipleServers(self): port = portpicker.pick_unused_port() jax.profiler.start_server(port=port) port = portpicker.pick_unused_port() with self.assertRaisesRegex( ValueError, "Only one profiler server can be active at a time."): jax.profiler.start_server(port=port) jax.profiler.stop_server() def testCantStopServerBeforeStartingServer(self): with self.assertRaisesRegex(ValueError, "No active profiler server."): jax.profiler.stop_server() def testProgrammaticProfiling(self): with tempfile.TemporaryDirectory() as tmpdir: try: jax.profiler.start_trace(tmpdir) jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')( jnp.ones(jax.local_device_count())) finally: jax.profiler.stop_trace() proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"), recursive=True) self.assertEqual(len(proto_path), 1) with open(proto_path[0], "rb") as f: proto = f.read() # Sanity check that serialized proto contains host, device, and # Python traces without deserializing. self.assertIn(b"/host:CPU", proto) if jtu.device_under_test() == "tpu": self.assertIn(b"/device:TPU", proto) self.assertIn(b"pxla.py", proto) def testProgrammaticProfilingErrors(self): with self.assertRaisesRegex(RuntimeError, "No profile started"): jax.profiler.stop_trace() try: with tempfile.TemporaryDirectory() as tmpdir: jax.profiler.start_trace(tmpdir) with self.assertRaisesRegex( RuntimeError, "Profile has already been started. Only one profile may be run at a " "time."): jax.profiler.start_trace(tmpdir) finally: jax.profiler.stop_trace() def testProgrammaticProfilingContextManager(self): with tempfile.TemporaryDirectory() as tmpdir: with jax.profiler.trace(tmpdir): jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')( jnp.ones(jax.local_device_count())) proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"), recursive=True) self.assertEqual(len(proto_path), 1) with open(proto_path[0], "rb") as f: proto = f.read() # Sanity check that serialized proto contains host and device traces # without deserializing. self.assertIn(b"/host:CPU", proto) if jtu.device_under_test() == "tpu": self.assertIn(b"/device:TPU", proto) def testTraceAnnotation(self): x = 3 with jax.profiler.TraceAnnotation("mycontext"): x = x + 2 def testTraceFunction(self): @jax.profiler.annotate_function def f(x, *, y): return x + 2 * y self.assertEqual(f(7, y=3), 13) @jax.profiler.annotate_function def f(x, *, name): return x + 2 * len(name) self.assertEqual(f(7, name="abc"), 13) @partial(jax.profiler.annotate_function, name="aname") def g(x): return x + 2 self.assertEqual(g(7), 9) @partial(jax.profiler.annotate_function, name="aname", akwarg="hello") def h(x): return x + 2 self.assertEqual(h(7), 9) def testDeviceMemoryProfile(self): x = jnp.ones((20,)) + 7. self.assertIsInstance(jax.profiler.device_memory_profile(), bytes) del x def _check_xspace_pb_exist(self, logdir): path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb') self.assertEqual(1, len(glob.glob(path)), 'Expected one path match: ' + path) @unittest.skip("Test causes OOMs") @unittest.skipIf(not (portpicker and profiler_client and tf_profiler), "Test requires tensorflow.profiler and portpicker") def testSingleWorkerSamplingMode(self, delay_ms=None): def on_worker(port, worker_start): jax.profiler.start_server(port) worker_start.set() x = jnp.ones((1000, 1000)) while True: with jax.profiler.TraceAnnotation("atraceannotation"): jnp.dot(x, x.T).block_until_ready() if self.profile_done: jax.profiler.stop_server() break def on_profile(port, logdir, worker_start): worker_start.wait() options = tf_profiler.ProfilerOptions( host_tracer_level=2, python_tracer_level=2, device_tracer_level=1, delay_ms=delay_ms, ) # Request for 1000 milliseconds of profile. duration_ms = 1000 profiler_client.trace(f'localhost:{port}', logdir, duration_ms, '', 1000, options) self.profile_done = True logdir = absltest.get_default_test_tmpdir() # Remove any existing log files. shutil.rmtree(logdir, ignore_errors=True) port = portpicker.pick_unused_port() thread_profiler = threading.Thread( target=on_profile, args=(port, logdir, self.worker_start)) thread_worker = threading.Thread( target=on_worker, args=(port, self.worker_start)) thread_worker.start() thread_profiler.start() thread_profiler.join() thread_worker.join(120) self._check_xspace_pb_exist(logdir) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
wmdlogicmt.py
# -*- coding: utf-8 -*- from __future__ import print_function import sys reload(sys) sys.setdefaultencoding("utf-8") import os import logging import requests import json from stop_words import get_stop_words import nltk.data from nltk.tokenize import RegexpTokenizer import gensim import shutil import codecs from gensim import models, corpora, similarities, matutils from gensim.similarities import WmdSimilarity from core import utils import threading from modules.machinelogic.imachinelogic.mlimachinelogic import MLInternalMachineLogic, MLInternalMachineLogicTrainer from modules.brain.mlbrain import MLBrain, TrainingCorpus, convert_train_data from core.progressbar import ProgressBar, Percentage, Bar, ETA, FormatLabel, AnimatedMarker from time import sleep from multiprocessing import Lock, Process, Queue, current_process wordTokenizer = RegexpTokenizer(u'\w+') """ Class MLInternalMachineLogic as described at: Copyright (c) 2019 Imdat Solak All Rights Reserved """ WMD_FILE = 'wmd.emb' wmd_mutex = threading.Lock() query_results_mutex = threading.Lock() # Extract tokens from text sentenceTokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # Get a list of common German words that can be removed from tokens en_stop = get_stop_words('en') # Add additional words if neccessary to remove noisy information in SSD Config def read_all_sentences(top_directory, remove_stop_words=True, stop_words=None): """ Iterate over all documents, yielding a document (=list of utf8 tokens) at a time. Updated: 2016-12-29 13:28 CET, ISO Fixed so many bugs, unbelievable :-() """ global sentenceTokenizer global wordTokenizer sentences = [] if remove_stop_words and not stop_words: stop_words = get_stop_words('en') for root, dirs, files in os.walk(top_directory): counter = 0 widgets=[FormatLabel(' | File: %(message)s [%(value)s/'+str(len(files))+']'), ' ', Percentage(), ' ', Bar(marker='@', left='[', right=']'), ' ', ETA()] pBar = ProgressBar(widgets=widgets, maxval=len(files)).start() files.sort() for filename in filter(lambda filename: filename.endswith('.txt'), files): counter += 1 pBar.update(counter, filename) filename = os.path.join(root, filename) fContents = codecs.open(filename, 'r', 'utf-8').read() # read the entire file as a large string fContents = ' '.join(fContents.split('\n')) contents = sentenceTokenizer.tokenize(fContents) for content in contents: content = content.lower() try: tokens = wordTokenizer.tokenize(content) if remove_stop_words is True: stopped_tokens = [i for i in tokens if not i in stop_words] else: stopped_tokens = tokens sentences.append(stopped_tokens) except: pass pBar.finish() return sentences def perform_query_for_job_on_instance_process(wmd_logic, wmd_instances, query, jobID, instance, in_q, out_q): sims = [] logger = logging.getLogger(os.path.basename(sys.argv[0])) if len(query): logger.info('WMD: Searching at instance {}'.format(instance)) wmd_instance = wmd_instances[instance] sims = wmd_instance[query] logger.info('WMD: Sending result from instance {} to parent'.format(instance)) out_q.put(sims) out_q.close() logger.info('WMD: Waiting for RECEIPT from parent at instance {}'.format(instance)) done = in_q.get() os._exit(0) def create_wmd_instances_process(all_knowledge, wmd_model, num_results, wmd_instance_count, in_q, out_q): """ This function updates the WMD-Instance used in the WMDLogicModule NOTE: This runs in a its own processes in order not to block return answers faster... """ logger = logging.getLogger(os.path.basename(sys.argv[0])) logger.info(' | +----> CHILD: RE-Creating in separate process...') sys.stdout.flush() wmd_instances = [] wmd_corpus = [] for tokens in all_knowledge: wmd_corpus.append(tokens) if wmd_instance_count > len(wmd_corpus): wmd_instance_count = len(wmd_corpus) chunk_size = int(len(wmd_corpus) / wmd_instance_count) for i in range(0, wmd_instance_count): logger.info(' Instance %d..' %i) sys.stdout.flush() if i == wmd_instance_count -1: wmd_instance = WmdSimilarity(wmd_corpus[i*chunk_size:], wmd_model, num_results) else: wmd_instance = WmdSimilarity(wmd_corpus[i*chunk_size:(i+1)*chunk_size], wmd_model, num_results) wmd_instances.append(wmd_instance) logger.info(' | +----> CHILD: Adding chunksize to out_q') out_q.put(chunk_size) logger.info(' | +----> CHILD: Adding instances to out_q') out_q.put(wmd_instances) out_q.close() logger.info(' | +----> CHILD: Waiting for data to be flushed to my PARENT...') done = in_q.get() logger.info(' | +----> CHILD: I have done my job (Parent is happy), I am going away now...') os._exit(0) return True def recreate_wmd_instances(wmd_logic_module, wmd_model, brain, num_results, wmd_instance_count): """ This function updates the WMD-Instance used in the WMDLogicModule NOTE: This runs in a its own thread in order not to block return answers faster... """ logger = logging.getLogger(os.path.basename(sys.argv[0])) if wmd_logic_module.getRequiresRetraining() == False: logger.info('WMD : NO RETRAINING REQUIRED') wmd_logic_module.retrainingDone() return wmd_logic_module.setRequiresRetraining(False) try: wmd_mutex.acquire() logger.info('WMD : RECREATING INDEX') wmd_instances = [] wmd_corpus = [] corpus_data = brain.getAllQuestions() for q in corpus_data: t = ' '.join(q.strip().lower().split('\n')) t = brain.getStopWordCleanedText(t) tokens = wordTokenizer.tokenize(t) wmd_corpus.append(tokens) out_q = Queue() in_q = Queue() process = Process(target=create_wmd_instances_process, args=(wmd_corpus, wmd_model, num_results, wmd_instance_count, in_q, out_q)) process.start() logger.info(' | +--> Waiting for response from my child...') chunk_size = out_q.get() wmd_instances = out_q.get() logger.info(' | +--> Retrieved data from CHILD, great kid. I will let it know everything was fine...') if isinstance(chunk_size, int) and isinstance(wmd_instances, list): wmd_logic_module.setWMDInstances(wmd_instances, chunk_size) wmd_mutex.release() else: logger.info(' | *** ERROR: Queue was corrupted...') wmd_mutex.release() in_q.put('DONE') in_q.close() process.join() wmd_logic_module.retrainingDone() except: wmd_logic_module.setRequiresRetraining(True) wmd_logic_module.retrainingDone() class WMDLogicMTTrainer(MLInternalMachineLogicTrainer): def __init__(self, moduleConfigSection, configDict, brain): super(WMDLogicMTTrainer, self).__init__(moduleConfigSection, configDict, brain) self.brain = brain if moduleConfigSection != None: self.module_config = moduleConfigSection else: print('**** ERROR: moduleConfigSection cannot be None!') sys.exit(1) self.config_dict = configDict def _generate_word2vec(self): global WMD_FILE print(' | Generating Word2Vec model...') wmd_data_path = self.module_config.get('wmd_data_path', None) if wmd_data_path == None: logger.info('**** Error: no output_root_path specified in section my i-machinelogic-section in the config-file.') sys.exit(1) else: utils.safe_create_directory(wmd_data_path) w2v_dim = 100 w2v_mc = 5 w2v_win = 5 w2v_iter = 10 w2v_workers = 4 create_nn_embed = 0 nn_embed_path = None if self.config_dict is not None: w2v_dim = int(self.config_dict['bender-training'].get('word2vec_dims', w2v_dim)) w2v_mc = int(self.config_dict['bender-training'].get('word2vec_min_count', w2v_mc)) w2v_win = int(self.config_dict['bender-training'].get('word2vec_window', w2v_win)) w2v_iter = int(self.config_dict['bender-training'].get('word2vec_iter', w2v_iter)) w2v_workers = int(self.config_dict['bender-training'].get('word2vec_workers', w2v_workers)) create_nn_embed = int(self.config_dict['bender-training'].get('create_nn_embed', 0)) nn_embed_path = self.config_dict['bender-training'].get('nn_embed_path', None) train_data_source_dir = self.config_dict['bender-training'].get('train_data_source_file', None) remove_stop_words = int(self.config_dict['bender-training'].get('remove_stop_words', 0)) language = self.config_dict['bender-training'].get('data_language_short', 'en') stop_words = get_stop_words(language) if train_data_source_dir is not None: train_data_source_dir = os.path.dirname(train_data_source_dir) dict_source_dir = self.config_dict['bender-training'].get('dictionary_data_source_path') else: print('Error: SOMETHING IS REALLY BAD HERE. No CONFIG-FILE FOUND') sys.exit(1) if train_data_source_dir is not None: sentences = read_all_sentences(train_data_source_dir, remove_stop_words, stop_words) else: sentences = [] sentences += read_all_sentences(dict_source_dir, remove_stop_words, stop_words) print(' | ... starting word2vec generation ...') model = models.Word2Vec(sentences, size=w2v_dim, min_count=w2v_mc, iter=w2v_iter, window=w2v_win, workers=w2v_workers, sg=1) wmd_w2v_filename = os.path.join(wmd_data_path, WMD_FILE) model.save(wmd_w2v_filename) if create_nn_embed == 1 and nn_embed_path is not None: weights = model.wv.syn0 d = dict([(k, v.index) for k, v in model.wv.vocab.items()]) emb = numpy.zeros(shape=(len(self.doc2id)+1, w2v_dim), dtype='float32') for i, w in self.doc2id.items(): if w not in d: continue emb[i, :] = weights[d[w], :] numpy.save(open(os.path.join(nn_embed_path, 'word2vec_nn.embed'), 'wb'), emb) print(' | ... done') def train(self): self._generate_word2vec() def reTrain(self): # self._generate_word2vec() return None class WMDLogicMT(MLInternalMachineLogic): def __init__(self, moduleConfigSection, configDictionary): super(WMDLogicMT, self).__init__(moduleConfigSection, configDictionary) global WMD_FILE self.profile = { "name" : "wmd-logic", "class" : "internalmachine-logic", 'accepted-languages': ['de','en','fr','tr','it','nl','se','no','fi','pl','cz','hu'], 'accepted-media-types' : ['text/utf8'], 'returned-media-types' : ['text/utf8'], 'requires-original-query' : True, 'returns-response-id' : True, 'always-ask' : True } self.module_config = moduleConfigSection self.config_dict = configDictionary language = utils.getKeyFromSectionInConfiguration('bender-training', 'data_language_short', 'en', self.config_dict) self.remove_stop_words = int(utils.getKeyFromSectionInConfiguration('bender-training', 'remove_stop_words', 1, self.config_dict)) retraining_interval_mins = int(self.module_config.get('retraining_interval_in_minutes', 23)) if retraining_interval_mins < 5: retraining_interval_mins = 5 self.retraining_interval_in_seconds = retraining_interval_mins * 60 self.stop_words = get_stop_words(language) self.higher_threshold = float(self.module_config.get('wmd_higher_threshold', 0.7)) self.lower_threshold = float(self.module_config.get('wmd_lower_threshold', 0.5)) self.num_results = int(self.module_config.get('max_wmd_results', '10')) self.num_instances = int(self.module_config.get('wmd_num_instances', '10')) self.wmd_timeout = int(self.module_config.get('wmd_timeout', '30')) self.logger = logging.getLogger(os.path.basename(sys.argv[0])) self.is_master = int(self.module_config.get('is-master', 0)) self.contribution_factor = int(self.module_config.get('contribution-factor', 500)) self.wmd_model = None self.wmd_instances = [] self.query_results = {} self.process_queues = {} self.chunk_size = 0 self.learning_update_timer = None self.learning_lock = threading.Lock() self.requires_learning = False self.wmd_instances_lock = threading.Lock() def capabilities(self): return self.profile def isMaster(self): return self.is_master def contributionFactor(self): return self.contributionFactor def _updateWMD(self): if self.wmd_model == None: self.logger.info(' | +--> Loading WMD W2V data... ') sys.stdout.flush() wmd_data_path = self.module_config.get('wmd_data_path', None) wmd_filename = os.path.join(wmd_data_path, WMD_FILE) self.wmd_model = models.Word2Vec.load(wmd_filename) self.logger.info('done') self.logger.info(' | +--> Initializing sims... ') sys.stdout.flush() self.wmd_model.init_sims(replace=True) self.logger.info('done') self.logger.info(' | +--> Generating %d WMD-Instances... ' % self.num_instances) sys.stdout.flush() self.setRequiresRetraining(True) self.learning_update_timer = None recreate_wmd_instances(self, self.wmd_model, self.brain, self.num_results, self.num_instances) self.logger.info(' | WMD is ready to be used... ') def setWMDInstances(self, new_wmd_instances, chunk_size): self.wmd_instances_lock.acquire() self.wmd_instances = new_wmd_instances self.chunk_size = chunk_size self.wmd_instances_lock.release() def initForBender(self, benderInstance, jobID=None): self.logger.info(' | Loading WMD i-machine model... ') sys.stdout.flush() self.benderCore = benderInstance self.brain = self.benderCore.getBrain() self._updateWMD() def _performTextQuerySimpleMT(self, query_doc, jobID): # This is tricky......... # Let's first initialize all potential instance-results with an empty array in order to # 'allocate' the space # NOTE: As we are using the same code and memory-space for all Job-Processes, # we need to know which job the result belongs to when a Processes sends us # a result back... This is really tricky... self.logger.info('WMD: Will perform MT query') query_results_mutex.acquire() search_processes = [] # Lock the wmd_instances as we need them as they are to start # our processes. Once the processes are started, they should have a # copy of our wmd_instances, so we can release the lock then self.wmd_instances_lock.acquire() self.query_results[jobID] = [] self.process_queues[jobID] = [] for instance in range(0, self.num_instances): self.query_results[jobID].append([]) in_q = Queue() out_q = Queue() self.process_queues[jobID].append([in_q, out_q]) search_process = Process(target=perform_query_for_job_on_instance_process, args=(self, self.wmd_instances, query_doc, jobID, instance, in_q, out_q)) search_processes.append(search_process) for i, sp in enumerate(search_processes): self.logger.info('WMD: launching porocess %d' % i) sp.start() self.logger.info('WMD: LAUNCHEND PROCESS %d' % i) # The processes should now have copies of our wmd_instances, so we can release # this lock... self.wmd_instances_lock.release() # After starting the processes, let's wait for their return # This happens by waiting for the "OUT_Q" having the data we need (OUT from the processes perspective) # Once we receive our result, we send the process in its IN_Q a message that all is well. # Then we can join the process to finish it. # This is something we need to do for each process... for i, process in enumerate(search_processes): self.logger.info('WMD: waiting for child %d' % i) queues = self.process_queues[jobID][i] in_q = queues[0] out_q = queues[1] # Here, we might need a timeout in order not to wait forever... sims = out_q.get() # Send our child that we have received its information in_q.put('THANKS') # Close the comms-queue in_q.close() # Finish the process process.join() if sims != None and len(sims)>0: res = [] # We need to scale the found ids back to our complete corpus size # since the indices we received are relative to each corpus... for sim in sims: aSim = (int(sim[0] + (i * self.chunk_size)), sim[1]) res.append(aSim) if sims != None: self.query_results[jobID][i] = res qResults = [] if jobID in self.query_results.keys(): qResults.extend(self.query_results[jobID]) del self.query_results[jobID] if jobID in self.process_queues.keys(): del self.process_queues[jobID] query_results_mutex.release() sims = [] for r in qResults: sims.extend(r) if len(sims): return sims else: return None def _performTextQuerySimple(self, theQuery, num_results=10, jobID=None): global wordTokenizer higherThresholdExists = False query = ' '.join(theQuery.strip().lower().split('\n')) tokens = wordTokenizer.tokenize(query) if self.remove_stop_words: tokens = [w for w in tokens if not w in self.stop_words] query_doc = [w for w in tokens if w.isalpha()] sims = self._performTextQuerySimpleMT(query_doc, jobID) result = [] if sims != None: sims = sorted(sims, key=lambda item: -item[1]) sims = sims[:min(len(sims), num_results)] for sim in sims: if sim[1] > self.lower_threshold: result.append({'response': sim[0], 'confidenceLevel':sim[1], 'responseIsID': True}) if sim[1] > self.higher_threshold: higherThresholdExists = True if len(result) > 0: return result, higherThresholdExists else: return None, False else: return None, None def performTextQuery(self, originalQuery, num_results=10, jobID=None): retArr = [] result, higherThresholdFound = self._performTextQuerySimple(originalQuery, num_results, jobID) if result != None: jcHC = self.benderCore.higherConfidenceLevel() jcLC = self.benderCore.lowerConfidenceLevel() for sim in result: similarityValue = float(sim['confidenceLevel']) # We need to normalize them for BenderCore thresholds # Otherwise BenderCore will either reject or ask a Human self.logger.info('.......... index=%s, sim=%f' % (str(sim['response']), similarityValue)) if similarityValue >= self.lower_threshold: if similarityValue >= self.higher_threshold: multiplier = (1-jcHC) / (1-self.higher_threshold) diff = similarityValue - self.higher_threshold similarityValue = jcHC + (diff * multiplier) else: multiplier = (jcHC - jcLC) / (self.higher_threshold - self.lower_threshold) diff = similarityValue - self.lower_threshold similarityValue = jcLC + (diff * multiplier) sim['confidenceLevel'] = similarityValue self.logger.info('.......... -> index=%s, sim=%f' % (str(sim['response']), similarityValue)) sim['is-master'] = self.is_master sim['contribution-factor'] = self.contribution_factor retArr.append(sim) return retArr def performImageQuery(self, query): return None, None def performAudioQuery(self, query): return None, None def performVideoQuery(self, query): return None, None def reinforceCorrectResponse(self, query, response, inputMediaType, outputMediaType): self.setRequiresRetraining(True) self.resetLearningTimer() return None, None def reinforceWrongResponse(self, query, response, inputMediaType, outputMediaType): self.setRequiresRetraining(True) self.resetLearningTimer() return None, None def setRequiresRetraining(self, flag): self.learning_lock.acquire() self.requires_learning = flag self.learning_lock.release() def getRequiresRetraining(self): return self.requires_learning def resetLearningTimer(self): # Once we have created all WMD instance, we should fire of a timer that, every <interval> seconds # checks whether new learning is required. If so, it should update learning if self.learning_update_timer != None: return self.learning_update_timer = threading.Timer(self.retraining_interval_in_seconds, recreate_wmd_instances, args=(self, self.wmd_model, self.brain, self.num_results, self.num_instances)) self.learning_update_timer.start() def retrainingDone(self): # Re-schedules re-training self.learning_update_timer = threading.Timer(self.retraining_interval_in_seconds, recreate_wmd_instances, args=(self, self.wmd_model, self.brain, self.num_results, self.num_instances)) self.learning_update_timer.start()
test_executors.py
import multiprocessing import sys import threading import time from datetime import timedelta from unittest.mock import MagicMock import pytest import prefect from prefect.utilities.executors import Heartbeat, timeout_handler def test_heartbeat_calls_function_on_interval(): class A: def __init__(self): self.called = 0 def __call__(self): self.called += 1 a = A() timer = Heartbeat(0.09, a) timer.start() time.sleep(0.2) timer.cancel() timer.join() assert a.called == 2 def test_timeout_handler_times_out(): slow_fn = lambda: time.sleep(2) with pytest.raises(TimeoutError): timeout_handler(slow_fn, timeout=1) def test_timeout_handler_passes_args_and_kwargs_and_returns(): def do_nothing(x, y=None): return x, y assert timeout_handler(do_nothing, 5, timeout=1, y="yellow") == (5, "yellow") def test_timeout_handler_doesnt_swallow_bad_args(): def do_nothing(x, y=None): return x, y with pytest.raises(TypeError): timeout_handler(do_nothing, timeout=1) with pytest.raises(TypeError): timeout_handler(do_nothing, 5, timeout=1, z=10) with pytest.raises(TypeError): timeout_handler(do_nothing, 5, timeout=1, y="s", z=10) def test_timeout_handler_reraises(): def do_something(): raise ValueError("test") with pytest.raises(ValueError, match="test"): timeout_handler(do_something, timeout=1) @pytest.mark.skipif(sys.platform == "win32", reason="Test fails on Windows") def test_timeout_handler_allows_function_to_spawn_new_process(): def my_process(): p = multiprocessing.Process(target=lambda: 5) p.start() p.join() p.terminate() assert timeout_handler(my_process, timeout=1) is None @pytest.mark.skipif(sys.platform == "win32", reason="Test fails on Windows") def test_timeout_handler_allows_function_to_spawn_new_thread(): def my_thread(): t = threading.Thread(target=lambda: 5) t.start() t.join() assert timeout_handler(my_thread, timeout=1) is None def test_timeout_handler_doesnt_do_anything_if_no_timeout(monkeypatch): monkeypatch.delattr(prefect.utilities.executors, "ThreadPoolExecutor") with pytest.raises(NameError): # to test the test's usefulness... timeout_handler(lambda: 4, timeout=1) assert timeout_handler(lambda: 4) == 4 def test_timeout_handler_preserves_context(): def my_fun(x, **kwargs): return prefect.context.get("test_key") with prefect.context(test_key=42): res = timeout_handler(my_fun, 2, timeout=1) assert res == 42 def test_timeout_handler_preserves_logging(caplog): timeout_handler(prefect.Flow("logs").run, timeout=2) assert len(caplog.records) >= 2 # 1 INFO to start, 1 INFO to end
radiation_measurement.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue May 7 11:33:50 2019 @author: callie_macbookair NOTE: You MUST have a sleep time in between starting the digitizer and starting fifo. The digitizer needs time start up and fifo will time out if you do not wait. I suggest something like 5-7 seconds. """ import logging import os import time import subprocess from makeHistogram import makeHistogram_noTable import matplotlib.pyplot as plt import numpy as np import threading class radiation_measurement(): #On init start logging and set the buffer time (how often access shared memory) def __init__(self): # Turn on logging so can get some debugging action logging.basicConfig(format='%(asctime)s %(message)s') self.log = logging.getLogger() self.log.setLevel(logging.DEBUG) self.log.debug('Logging has started') #Time to wait before grabbing data from shared memory again, used in FIFO self.bufferTime = 0.1 # digitizer_location = '/home/kgoetz/Documents/digiTES_4.5.13/bin' # save_location = '/home/kgoetz/Documents' # run_name = 'test_run' #This method starts the digitizer running independantly in its own terminal #It requires a measurement time, the location the digitizer is writing to, the location where you want to save you data from the run and the run name def run_digitizer(self,measurement_time,digitizer_location,save_location,run_name): #Autogenerate a digitizer command file that will be used to pipe commands into digites f = open(digitizer_location+'/digitizer_command.txt', 'w') #put script in folder with digitizer f.write('\n') f.write('\n') f.write('s\n') f.write('\n') f.write('l\n') for n in range(0,measurement_time-2): f.write('\n') f.write('h\n') f.write('\n') f.write('s\n') f.write('\n') f.write('q\n') f.close() #Wait 1 second to let it finish writing file time.sleep(1) #Autogenerate script that runs digitizer and moves data safely to named folder when digitizer is finished d = open('run_digiTES.bash', 'w') #put script in current directory d.write('#!/bin/bash \n') d.write('cd '+digitizer_location+'/ \n') #change to digitizer location d.write('\n') d.write('while read input\n') d.write('do echo "$input"\n') d.write('\t sleep 1\n') d.write('done < digitizer_command.txt | ./digiTES\n') d.write('\n') d.write('cd '+save_location+'/ \n') #change to digitizer location d.write('mkdir '+run_name+'\n') #make a folder with the run name d.write('cp '+digitizer_location+'/DataFiles/* '+save_location+'/'+run_name+'/ \n') #move all data from folder digites writes to folder with run name d.write('\n') d.close() #Wait 1 second to let it finish writing file time.sleep(1) #Start digitizer process self.proc = subprocess.call(['gnome-terminal','-x','./run_digiTES.bash']) self.log.debug('Digitizer Started') def acquire_data(self,digitizer_location, channels_activated=[True,True,True,True]): ## CARL ################# # This method is untested and I'm not sure if its right but you said you are familiar with it and it should give you an idea of how I was thinking about starting up a new thread #I think we want to use a channel mask because enabling/disabling channels will be easy #Wait 7 seconds after start acquiring to let digitizer have some time to start up time.sleep(7) #If channel 0 is enabled, start reading in if channel_mask[0]: filename = digitizer_location+'/DataFiles/Run0_List_0_0.txt' #Start new thread with fifo self.ch0 = threading.Thread(target=self.fifo,args=(filename,0)) self.ch0.start() #If channel 1 is enabled, start reading in if channel_mask[1]: filename = digitizer_location+'/DataFiles/Run0_List_0_1.txt' #Start new thread with fifo self.ch1 = threading.Thread(target=self.fifo,args=(filename,1)) self.ch1.start() #If channel 2 is enabled, start reading in if channel_mask[2]: filename = digitizer_location+'/DataFiles/Run0_List_0_2.txt' #Start new thread with fifo self.ch2 = threading.Thread(target=self.fifo,args=(filename,2)) self.ch2.start() #If channel 3 is enabled, start reading in if channel_mask[3]: filename = digitizer_location+'/DataFiles/Run0_List_0_3.txt' #Start new thread with fifo self.ch3 = threading.Thread(target=self.fifo,args=(filename,3)) self.ch3.start() # THIS METHOD OPENS A PIPE TO SHARED MEMORY, GRABS DATA COMING IN EVERY X NUMBER OF SECONDS (DEFINED BY BUFFER_TIME) AND OUTPUTS NUMPY ARRAYS FOR TIMESTAMP (PICOSECONDS) # EVENT ENERGY(ADC COUNTS), PSD VALUE AND ENERGY HISTOGRAM # IT NEEDS A FILE NAME AND A CHANNEL NUMBER def fifo(self,filename,channel_number): #Check that digitizer is running if not os.path.exists(filename): self.log.debug('Start acquistion please') #If its not running wait 5 seconds time.sleep(5) else: self.log.debug('Acquistion running, pipe present: OK') self.log.debug('Pipe to data for channel '+str(channel_number)+' is open for reading') #set new pipe variable to true at beginning of run newPipe=True #Initialize wait count to 0 wait_count = 0 #Open the pipe to shared memory with open(filename, 'r') as fifo: # initialize empty arrays tr = [] l = [] psd = [] while True: data = fifo.read().splitlines() #split incoming data into lines based on carriage return #Set up a time out so the code stops when the digitizer does if not data and wait_count < 11: self.log.debug('Waiting for data from digitizer on channel '+str(channel_number)) time.sleep(0.5) wait_count = wait_count+1 continue elif not data and wait_count == 11: self.log.debug('Digitizer has stopped, time out criteria reached, quitting shared memory access on channel '+str(channel_number)) return #Reset wait count to 0 every time get new data wait_count = 0 ############################ NOTE ############################ # Sometimes fifo accessses shared memory and grabs data when the digitizer is still writing a line, most of the things below are for dealing with that #Grab the first line to fill first first = data[0] first_words = first.split(' ') while '' in first_words: #get rid of extra spaces first_words.remove('') #if this is a new file, no need to worry if things have been cut off, but do fill last if newPipe is True: for line in data[-1]: words = line.split(' ') # split line into space delimited words while '' in words: #get rid of extra spaces words.remove('') # print("Received Data: " + str(words)) #build 1D arrays with list mode data if len(words) == 3: tr.append(words[0]) # trigger time in ps l.append(words[1]) # total integrated charge of event in counts psd.append(words[2]) # psd value else: #Just a bit of error checking print("Read error on channel "+str(channel_number)+", skipping line") print("Data line on channel "+str(channel_number)+" is:") print(words) #uncomment for debugging # print("Long gate is: " + str(l)) time.sleep(self.bufferTime) # wait x number of seconds to check for data coming in on pipe last = data[-1] #new pipe is now false newPipe = False #if its not a new pipe, proceed as normal else: for line in data[1:-1]: words = line.split(' ') # split line into space delimited words while '' in words: words.remove('') #build 1D arrays with list mode data tr.append(float(words[0])*(10**-12)) # trigger time in ns l.append(float(words[1])) # total integrated charge of event in counts psd.append(float(words[2])) # psd value newline = [] last_words = last.split(' ') while '' in last_words: #get rid of extra spaces last_words.remove('') #if both the last line of the old data file and the first line are fine then append them as expected if len(last_words) == 3 and len(first_words) ==3: tr.append(last_words[0]) # trigger time in ns l.append(last_words[1]) # total integrated charge of event in counts psd.append(last_words[2]) # psd value tr.append(first_words[0]) # trigger time in ns l.append(first_words[1]) # total integrated charge of event in counts psd.append(first_words[2]) # psd value #if last words and first words have the length of 2 then they split in the long gate elif len(last_words) == 2 and len(first_words) ==2: print('Read split in long gate value in channel '+str(channel_number)+'. Fixing.') newline = last+first print("Whole line in channel "+str(channel_number)+" is: " + newline) new_words = newline.split(' ') while '' in new_words: #get rid of extra spaces new_words.remove('') tr.append(new_words[0]) # trigger time in ps l.append(new_words[1]) # total integrated charge of event in counts psd.append(new_words[2]) # psd value # self.log.debug('Read split in long gate value. Fixing.') #if the last word is 3 but the first word is 1 then split on the PSD value elif len(last_words) == 3 and len(first_words) ==1: print('Read split in PSD value in channel '+str(channel_number)+'. Fixing.') newline = last+first print("Whole line in channel "+str(channel_number)+" is: " + newline) new_words = newline.split(' ') while '' in new_words: #get rid of extra spaces new_words.remove('') tr.append(new_words[0]) # trigger time in ps l.append(new_words[1]) # total integrated charge of event in counts psd.append(new_words[2]) # psd value # self.log.debug('Read split in PSD value. Fixing.') #if the last word is 1 and the first word is 3 then split on the time stamp elif len(last_words) == 1 and len(first_words) ==3: print("Read split in time stamp in channel "+str(channel_number)+", fixing.") newline = last+first print("Whole line in channel "+str(channel_number)+" is: " + newline) new_words = newline.split(' ') while '' in new_words: #get rid of extra spaces new_words.remove('') tr.append(new_words[0]) # trigger time in ps l.append(new_words[1]) # total integrated charge of event in counts psd.append(new_words[2]) # psd value # self.log.debug('Read split in timestamp. Fixing.') # #Use this to catch if it splits in unexpected way else: #Write to log file here! # self.log.warning('Something very weird just happened. Read is split in an unknown way. This event is being discarded.') print("Read split in unknown way in channel "+str(channel_number)+", fixing") print("Last line in channel "+str(channel_number)+" is:") print(last) print("First line in channel "+str(channel_number)+" is:") print(first) print("Whole line in channel "+str(channel_number)+" is:" + last+first) newline = last+first new_words = newline.split(' ') while '' in new_words: #get rid of extra spaces new_words.remove('') # self.log.debug(first) tr.append(new_words[0]) # trigger time in ps l.append(new_words[1]) # total integrated charge of event in counts psd.append(new_words[2]) # psd value time.sleep(self.bufferTime) # wait x number of seconds to check for data coming in on pipe last = data[-1] #fill last line of data for comparison in next batch #uncomment for debugging (check whether data coming in) #print("Received Data: " + str(data)) #print('lololololololol') #Make nice numpy arrays out of the long gate and time stamp, these arrays update with every grab from shared memory self.timestamp = np.array(tr,dtype=np.float)*(10**-12) #put the time stamp in ns self.event = np.array(l,dtype=np.float) #event is the total integrated value in bin# for a given event self.psd = np.array(l,dtype=np.float) #PSD value print("Number of Events in channel "+str(channel_number)+": " + str(len(self.event))) if len(self.event > 0): [self.hist,self.bins] = makeHistogram_noTable(self.event) # plt.plot(bins,hist) # plt.xlim(0,2**14) # plt.show()
create_range_image_in_kitti.py
#keep full resolution range image import numpy as np import pickle as pkl from pdb import set_trace import os from kitti_utils import Calibration from pathlib import Path import matplotlib.pyplot as plt import tqdm import collections import argparse from queue import Queue from threading import Thread def parse_args(): parser = argparse.ArgumentParser(description='Create range images in KITTI') parser.add_argument('--source-dir', help='path to KITTI in MMDet3D format', type=str) parser.add_argument('--target-dir', help='path to save the extracted data') parser.add_argument('--num-threads', help='path to save the extracted data', type=int, default=10) args = parser.parse_args() return args def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib): """ Args: boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords calib: Returns: boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center """ xyz_camera = boxes3d_camera[:, 0:3] l, h, w, r = boxes3d_camera[:, 3:4], boxes3d_camera[:, 4:5], boxes3d_camera[:, 5:6], boxes3d_camera[:, 6:7] xyz_lidar = calib.rect_to_lidar(xyz_camera) xyz_lidar[:, 2] += h[:, 0] / 2 return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1) def to_xyz0z1(bbox_type7): #[n, r, 7] batch_size, num_bbox, _ = bbox_type7.shape dtype = bbox_type7.dtype xy_4pts = np.full((batch_size, num_bbox, 4, 2), 0, dtype = dtype) xy_4pts[:,:,0,:] = np.array([[[ 0.5, -0.5]]], dtype = dtype) * bbox_type7[:,:,3:5] xy_4pts[:,:,1,:] = np.array([[[-0.5, -0.5]]], dtype = dtype) * bbox_type7[:,:,3:5] xy_4pts[:,:,2,:] = np.array([[[-0.5, 0.5]]], dtype = dtype) * bbox_type7[:,:,3:5] xy_4pts[:,:,3,:] = np.array([[[ 0.5, 0.5]]], dtype = dtype) * bbox_type7[:,:,3:5] cosa = np.cos(bbox_type7[:,:,-1]) sina = np.sin(bbox_type7[:,:,-1]) rot_mat = np.stack([cosa, -sina, sina, cosa], axis = -1).reshape(batch_size, num_bbox, 2, 2) rot_4pts = np.einsum('nrij,nrmj->nrmi', rot_mat, xy_4pts) rot_4pts = rot_4pts + bbox_type7[:,:,None,:2] rot_4pts = rot_4pts.reshape(batch_size, num_bbox, 8) z0 = bbox_type7[:,:,2] - bbox_type7[:,:,5] / 2 z1 = bbox_type7[:,:,2] + bbox_type7[:,:,5] / 2 bbox_xyz0z1 = np.concatenate([rot_4pts, z0[:,:,None], z1[:,:,None]], axis = 2) return bbox_xyz0z1 def to_8pts(bbox_4pts): bbox_4pts = bbox_4pts.astype(np.float32) xy = bbox_4pts[:,:8].reshape(-1,4,2) z_bot = bbox_4pts[:,8] z_bot = np.tile(z_bot[:,None],(1,4)) z_top = bbox_4pts[:,9] z_top = np.tile(z_top[:,None],(1,4)) xyz_bot = np.concatenate([xy, z_bot[:,:,None]],axis = 2) xyz_top = np.concatenate([xy, z_top[:,:,None]],axis = 2) bbox_8pts = np.concatenate([xyz_bot,xyz_top], axis = 1) return bbox_8pts def name_to_cls(names): cls_mapping = {'Car':1, 'Pedestrian':2, 'Cyclist':4} gt_class = [] for name in names: if name in cls_mapping: gt_class.append(cls_mapping[name]) else: gt_class.append(-1) gt_class = np.array(gt_class) return gt_class def get_pc(target_dir, pc_idx, is_test=False): if is_test: path = '{}/testing/velodyne/{}.bin'.format(target_dir, pc_idx) else: path = '{}/training/velodyne/{}.bin'.format(target_dir, pc_idx) pc = np.fromfile(path, dtype = np.float32).reshape(-1, 4) return pc def get_gt_bbox(location, dimensions, rotation_y, calib): ''' location: object location x,y,z in camera coordinates (in meters) dimensions 3D object dimensions: height, width, length (in meters) rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] ''' gt_bbox_camera = np.concatenate([location, dimensions, rotation_y[:,None]], axis = 1).astype(np.float32) gt_bbox_lidar = boxes3d_kitti_camera_to_lidar(gt_bbox_camera, calib) bbox_xyz0z1 = to_xyz0z1(gt_bbox_lidar[None, :, :]).squeeze(0) bbox_8pts = to_8pts(bbox_xyz0z1) return bbox_8pts def get_range_image(pc, incl, height): incl_deg = incl * 180 / 3.1415 # print(incl - np.roll(incl, 1)) xy_norm = np.linalg.norm(pc[:, :2], ord = 2, axis = 1) error_list = [] for i in range(len(incl)): h = height[i] theta = incl[i] error = np.abs(theta - np.arctan2(h - pc[:,2], xy_norm)) error_list.append(error) all_error = np.stack(error_list, axis=-1) row_inds = np.argmin(all_error, axis=-1) azi = np.arctan2(pc[:,1], pc[:,0]) width = 2048 col_inds = width - 1.0 + 0.5 - (azi + np.pi) / (2.0 * np.pi) * width col_inds = np.round(col_inds).astype(np.int32) col_inds[col_inds == width] = width - 1 col_inds[col_inds < 0] = 0 empty_range_image = np.full((64, width, 5), -1, dtype = np.float32) point_range = np.linalg.norm(pc[:,:3], axis = 1, ord = 2) order = np.argsort(-point_range) point_range = point_range[order] pc = pc[order] row_inds = row_inds[order] col_inds = col_inds[order] empty_range_image[row_inds, col_inds, :] = np.concatenate([point_range[:,None], pc], axis = 1) return empty_range_image def get_calib(source_dir, idx, is_test): # if is_test: # path = '/mnt/truenas/scratch/zhichao.li/Data/KITTI/testing/calib' # else: # path = '/mnt/truenas/scratch/zhichao.li/Data/KITTI/training/calib' if is_test: path = os.path.join(source_dir, 'testing/calib') else: path = os.path.join(source_dir, 'training/calib') calib_file = os.path.join(path, '{}.txt'.format(idx)) p = Path(calib_file) assert p.exists() return Calibration(p) def crop_range_image(range_image): # width = 2083 // 4 mid = 2083 // 2 beg = mid - 256 end = mid + 256 return range_image[:,beg:end,:] def process_single_frame(frame, source_dir, target_dir, split, roidb_list): pc_idx = frame['point_cloud']['lidar_idx'] if split != 'test': calib = get_calib(source_dir, pc_idx, split=='test') annos = frame['annos'] gt_class = name_to_cls(annos['name']) gt_bbox = get_gt_bbox(annos['location'], annos['dimensions'], annos['rotation_y'], calib) else: gt_class = np.ones(0,dtype=np.float32) gt_bbox = np.zeros(0,dtype=np.float32) pc = get_pc(source_dir, pc_idx, split=='test') pc_url = os.path.join(target_dir, '{}/{}.npz'.format(npz_dirname, pc_idx)) range_image = get_range_image(pc, incl, height) range_image_mask = range_image[..., 0] > -1 roidb = { 'gt_class':gt_class, 'gt_bbox_imu':gt_bbox, 'pc_url':pc_url } roidb_list.append(roidb) np.savez( pc_url, range_image=range_image, range_image_mask=range_image_mask, ) def process_task_worker(frame_queue, source_dir, target_dir, split, roidb_list): while True: qsize = frame_queue.qsize() if qsize > 0: if qsize % 10 == 0: print('{} {} frames left'.format(qsize, split)) frame = frame_queue.get() else: print("No task left, break down.") break try: process_single_frame(frame, source_dir, target_dir, split, roidb_list) except Exception as e: print('Error: ', e) continue if __name__ == '__main__': # KITTI scanning parameters, obtained from Hough transformation height = np.array( [0.20966667, 0.2092 , 0.2078 , 0.2078 , 0.2078 , 0.20733333, 0.20593333, 0.20546667, 0.20593333, 0.20546667, 0.20453333, 0.205 , 0.2036 , 0.20406667, 0.2036 , 0.20313333, 0.20266667, 0.20266667, 0.20173333, 0.2008 , 0.2008 , 0.2008 , 0.20033333, 0.1994 , 0.20033333, 0.19986667, 0.1994 , 0.1994 , 0.19893333, 0.19846667, 0.19846667, 0.19846667, 0.12566667, 0.1252 , 0.1252 , 0.12473333, 0.12473333, 0.1238 , 0.12333333, 0.1238 , 0.12286667, 0.1224 , 0.12286667, 0.12146667, 0.12146667, 0.121 , 0.12053333, 0.12053333, 0.12053333, 0.12006667, 0.12006667, 0.1196 , 0.11913333, 0.11866667, 0.1182 , 0.1182 , 0.1182 , 0.11773333, 0.11726667, 0.11726667, 0.1168 , 0.11633333, 0.11633333, 0.1154 ]) zenith = np.array([ 0.03373091, 0.02740409, 0.02276443, 0.01517224, 0.01004049, 0.00308099, -0.00155868, -0.00788549, -0.01407172, -0.02103122, -0.02609267, -0.032068 , -0.03853542, -0.04451074, -0.05020488, -0.0565317 , -0.06180405, -0.06876355, -0.07361411, -0.08008152, -0.08577566, -0.09168069, -0.09793721, -0.10398284, -0.11052055, -0.11656618, -0.12219002, -0.12725147, -0.13407038, -0.14067839, -0.14510716, -0.15213696, -0.1575499 , -0.16711043, -0.17568678, -0.18278688, -0.19129293, -0.20247031, -0.21146846, -0.21934183, -0.22763699, -0.23536977, -0.24528179, -0.25477201, -0.26510582, -0.27326038, -0.28232882, -0.28893683, -0.30004392, -0.30953414, -0.31993824, -0.32816311, -0.33723155, -0.34447224, -0.352908 , -0.36282001, -0.37216965, -0.38292524, -0.39164219, -0.39895318, -0.40703745, -0.41835542, -0.42777535, -0.43621111 ]) incl = -zenith args = parse_args() data_splits = ['training', 'validation', 'test'] source_dir = os.path.abspath(args.source_dir) target_dir = os.path.abspath(args.target_dir) os.makedirs(target) num_threads = args.num_threads for split in data_splits: if split == 'training': npz_dirname = 'npz_trainval' info_path = os.path.join(source_dir, 'kitti_infos_train.pkl') elif split == 'validation': npz_dirname = 'npz_trainval' info_path = os.path.join(source_dir, 'kitti_infos_val.pkl') elif split == 'test': npz_dirname = 'npz_test' info_path = os.path.join(source_dir, 'kitti_infos_test.pkl') npz_dirpath = os.path.join(target_dir, npz_dirname) os.makedirs(npz_dirpath) # os.makedirs(npz_dirpath, exist_ok=True) print(f'Begin processing {split} split, and all created data will be saved under: {target_dir}') data_set = pkl.load(open(info_path, 'rb')) roidb_list = [] frame_queue = Queue() for i, frame in enumerate(data_set): frame_queue.put(frame) workers = [ Thread(target=process_task_worker, args=(frame_queue, source_dir, target_dir, split, roidb_list)) for _ in range(num_threads)] for w in workers: w.start() for w in workers: w.join() print(f'Got {len(roidb_list)} frame in {split} split.') if split == 'training': with open(os.path.join(target_dir, 'training.roidb'), 'wb') as fw: pkl.dump(roidb_list, fw) elif split == 'validation': with open(os.path.join(target_dir, 'validation.roidb'), 'wb') as fw: pkl.dump(roidb_list, fw) elif split == 'test': with open(os.path.join(target_dir, 'test.roidb'), 'wb') as fw: pkl.dump(roidb_list, fw)
midi_hub.py
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for interfacing with the MIDI environment.""" # TODO(adarob): Use flattened imports. import abc import collections import re import threading import time from magenta.common import concurrency from magenta.music.protobuf import music_pb2 import mido from six.moves import queue as Queue import tensorflow.compat.v1 as tf _DEFAULT_METRONOME_TICK_DURATION = 0.05 _DEFAULT_METRONOME_PROGRAM = 117 # Melodic Tom _DEFAULT_METRONOME_MESSAGES = [ mido.Message(type='note_on', note=44, velocity=64), mido.Message(type='note_on', note=35, velocity=64), mido.Message(type='note_on', note=35, velocity=64), mido.Message(type='note_on', note=35, velocity=64), ] _DEFAULT_METRONOME_CHANNEL = 1 # 0-indexed. _DRUM_CHANNEL = 9 try: # The RtMidi backend is easier to install and has support for virtual ports. import rtmidi # pylint: disable=unused-import,g-import-not-at-top mido.set_backend('mido.backends.rtmidi') except ImportError: # Tries to use PortMidi backend by default. tf.logging.warn('Could not import RtMidi. Virtual ports are disabled.') class MidiHubError(Exception): # pylint:disable=g-bad-exception-name """Base class for exceptions in this module.""" pass def get_available_input_ports(): """Returns a list of available input MIDI ports.""" return mido.get_input_names() def get_available_output_ports(): """Returns a list of available output MIDI ports.""" return mido.get_output_names() class MidiSignal(object): """A class for representing a MIDI-based event signal. Provides a `__str__` method to return a regular expression pattern for matching against the string representation of a mido.Message with wildcards for unspecified values. Supports matching for message types 'note_on', 'note_off', and 'control_change'. If a mido.Message is given as the `msg` argument, matches against the exact message, ignoring the time attribute. If a `msg` is not given, keyword arguments must be provided matching some non-empty subset of those listed as a value for at least one key in `_VALID_ARGS`. Examples: # A signal that matches any 'note_on' message. note_on_signal = MidiSignal(type='note_on') # A signal that matches any 'note_on' or 'note_off' message with a pitch # value of 4 and a velocity of 127. note_signal = MidiSignal(note=4, velocity=127) # A signal that matches a specific mido.Message exactly (ignoring time). msg = mido.Message(type='control_signal', control=1, value=127) control_1_127_signal = MidiSignal(msg=msg) Args: msg: A mido.Message that should be matched exactly (excluding the time attribute) or None if wildcards are to be used. **kwargs: Valid mido.Message arguments. Those that are not provided will be treated as wildcards. Raises: MidiHubError: If the message type is unsupported or the arguments are not in the valid set for the given or inferred type. """ _NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity']) _CONTROL_ARGS = set(['type', 'control', 'value']) _VALID_ARGS = { 'note_on': _NOTE_ARGS, 'note_off': _NOTE_ARGS, 'control_change': _CONTROL_ARGS, } def __init__(self, msg=None, **kwargs): if msg is not None and kwargs: raise MidiHubError( 'Either a mido.Message should be provided or arguments. Not both.') type_ = msg.type if msg is not None else kwargs.get('type') if 'type' in kwargs: del kwargs['type'] if type_ is not None and type_ not in self._VALID_ARGS: raise MidiHubError( "The type of a MidiSignal must be either 'note_on', 'note_off', " "'control_change' or None for wildcard matching. Got '%s'." % type_) # The compatible mido.Message types. inferred_types = [type_] if type_ is not None else [] # If msg is not provided, check that the given arguments are valid for some # message type. if msg is None: if type_ is not None: for arg_name in kwargs: if arg_name not in self._VALID_ARGS[type_]: raise MidiHubError( "Invalid argument for type '%s': %s" % (type_, arg_name)) else: if kwargs: for name, args in self._VALID_ARGS.items(): if set(kwargs) <= args: inferred_types.append(name) if not inferred_types: raise MidiHubError( 'Could not infer a message type for set of given arguments: %s' % ', '.join(kwargs)) # If there is only a single valid inferred type, use it. if len(inferred_types) == 1: type_ = inferred_types[0] self._msg = msg self._kwargs = kwargs self._type = type_ self._inferred_types = inferred_types def to_message(self): """Returns a message using the signal's specifications, if possible.""" if self._msg: return self._msg if not self._type: raise MidiHubError('Cannot build message if type is not inferrable.') return mido.Message(self._type, **self._kwargs) def __str__(self): """Returns a regex pattern for matching against a mido.Message string.""" if self._msg is not None: regex_pattern = '^' + mido.messages.format_as_string( self._msg, include_time=False) + r' time=\d+.\d+$' else: # Generate regex pattern. parts = ['.*' if self._type is None else self._type] for name in mido.messages.SPEC_BY_TYPE[self._inferred_types[0]][ 'value_names']: if name in self._kwargs: parts.append('%s=%d' % (name, self._kwargs[name])) else: parts.append(r'%s=\d+' % name) regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$' return regex_pattern class Metronome(threading.Thread): """A thread implementing a MIDI metronome. Args: outport: The Mido port for sending messages. qpm: The integer quarters per minute to signal on. start_time: The float wall time in seconds to treat as the first beat for alignment. If in the future, the first tick will not start until after this time. stop_time: The float wall time in seconds after which the metronome should stop, or None if it should continue until `stop` is called. program: The MIDI program number to use for metronome ticks. signals: An ordered collection of MidiSignals whose underlying messages are to be output on the metronome's tick, cyclically. A None value can be used in place of a MidiSignal to output nothing on a given tick. duration: The duration of the metronome's tick. channel: The MIDI channel to output on. """ daemon = True def __init__(self, outport, qpm, start_time, stop_time=None, program=_DEFAULT_METRONOME_PROGRAM, signals=None, duration=_DEFAULT_METRONOME_TICK_DURATION, channel=None): self._outport = outport self.update( qpm, start_time, stop_time, program, signals, duration, channel) super(Metronome, self).__init__() def update(self, qpm, start_time, stop_time=None, program=_DEFAULT_METRONOME_PROGRAM, signals=None, duration=_DEFAULT_METRONOME_TICK_DURATION, channel=None): """Updates Metronome options.""" # Locking is not required since variables are independent and assignment is # atomic. self._channel = _DEFAULT_METRONOME_CHANNEL if channel is None else channel # Set the program number for the channels. self._outport.send( mido.Message( type='program_change', program=program, channel=self._channel)) self._period = 60. / qpm self._start_time = start_time self._stop_time = stop_time if signals is None: self._messages = _DEFAULT_METRONOME_MESSAGES else: self._messages = [s.to_message() if s else None for s in signals] self._duration = duration def run(self): """Sends message on the qpm interval until stop signal received.""" sleeper = concurrency.Sleeper() while True: now = time.time() tick_number = max(0, int((now - self._start_time) // self._period) + 1) tick_time = tick_number * self._period + self._start_time if self._stop_time is not None and self._stop_time < tick_time: break sleeper.sleep_until(tick_time) metric_position = tick_number % len(self._messages) tick_message = self._messages[metric_position] if tick_message is None: continue tick_message.channel = self._channel self._outport.send(tick_message) if tick_message.type == 'note_on': sleeper.sleep(self._duration) end_tick_message = mido.Message( 'note_off', note=tick_message.note, channel=self._channel) self._outport.send(end_tick_message) def stop(self, stop_time=0, block=True): """Signals for the metronome to stop. Args: stop_time: The float wall time in seconds after which the metronome should stop. By default, stops at next tick. block: If true, blocks until thread terminates. """ self._stop_time = stop_time if block: self.join() class MidiPlayer(threading.Thread): """A thread for playing back a NoteSequence proto via MIDI. The NoteSequence times must be based on the wall time. The playhead matches the wall clock. The playback sequence may be updated at any time if `allow_updates` is set to True. Args: outport: The Mido port for sending messages. sequence: The NoteSequence to play. start_time: The float time before which to strip events. Defaults to construction time. Events before this time will be sent immediately on start. allow_updates: If False, the thread will terminate after playback of `sequence` completes and calling `update_sequence` will result in an exception. Otherwise, the the thread will stay alive until `stop` is called, allowing for additional updates via `update_sequence`. channel: The MIDI channel to send playback events. offset: The float time in seconds to adjust the playback event times by. """ def __init__(self, outport, sequence, start_time=time.time(), allow_updates=False, channel=0, offset=0.0): self._outport = outport self._channel = channel self._offset = offset # Set of notes (pitches) that are currently on. self._open_notes = set() # Lock for serialization. self._lock = threading.RLock() # A control variable to signal when the sequence has been updated. self._update_cv = threading.Condition(self._lock) # The queue of mido.Message objects to send, sorted by ascending time. self._message_queue = collections.deque() # An event that is set when `stop` has been called. self._stop_signal = threading.Event() # Initialize message queue. # We first have to allow "updates" to set the initial sequence. self._allow_updates = True self.update_sequence(sequence, start_time=start_time) # We now make whether we allow updates dependent on the argument. self._allow_updates = allow_updates super(MidiPlayer, self).__init__() @concurrency.serialized def update_sequence(self, sequence, start_time=None): """Updates sequence being played by the MidiPlayer. Adds events to close any notes that are no longer being closed by the new sequence using the times when they would have been closed by the previous sequence. Args: sequence: The NoteSequence to play back. start_time: The float time before which to strip events. Defaults to call time. Raises: MidiHubError: If called when _allow_updates is False. """ if start_time is None: start_time = time.time() if not self._allow_updates: raise MidiHubError( 'Attempted to update a MidiPlayer sequence with updates disabled.') new_message_list = [] # The set of pitches that are already playing and will be closed without # first being reopened in in the new sequence. closed_notes = set() for note in sequence.notes: if note.start_time >= start_time: new_message_list.append( mido.Message(type='note_on', note=note.pitch, velocity=note.velocity, time=note.start_time)) new_message_list.append( mido.Message(type='note_off', note=note.pitch, time=note.end_time)) elif note.end_time >= start_time and note.pitch in self._open_notes: new_message_list.append( mido.Message(type='note_off', note=note.pitch, time=note.end_time)) closed_notes.add(note.pitch) # Close remaining open notes at the next event time to avoid abruptly ending # notes. notes_to_close = self._open_notes - closed_notes if notes_to_close: next_event_time = ( min(msg.time for msg in new_message_list) if new_message_list else 0) for note in notes_to_close: new_message_list.append( mido.Message(type='note_off', note=note, time=next_event_time)) for msg in new_message_list: msg.channel = self._channel msg.time += self._offset self._message_queue = collections.deque( sorted(new_message_list, key=lambda msg: (msg.time, msg.note))) self._update_cv.notify() @concurrency.serialized def run(self): """Plays messages in the queue until empty and _allow_updates is False.""" # Assumes model where NoteSequence is time-stamped with wall time. # TODO(hanzorama): Argument to allow initial start not at sequence start? while self._message_queue and self._message_queue[0].time < time.time(): self._message_queue.popleft() while True: while self._message_queue: delta = self._message_queue[0].time - time.time() if delta > 0: self._update_cv.wait(timeout=delta) else: msg = self._message_queue.popleft() if msg.type == 'note_on': self._open_notes.add(msg.note) elif msg.type == 'note_off': self._open_notes.discard(msg.note) self._outport.send(msg) # Either keep player alive and wait for sequence update, or return. if self._allow_updates: self._update_cv.wait() else: break def stop(self, block=True): """Signals for the playback to stop and ends all open notes. Args: block: If true, blocks until thread terminates. """ with self._lock: if not self._stop_signal.is_set(): self._stop_signal.set() self._allow_updates = False # Replace message queue with immediate end of open notes. self._message_queue.clear() for note in self._open_notes: self._message_queue.append( mido.Message(type='note_off', note=note, time=time.time())) self._update_cv.notify() if block: self.join() class MidiCaptor(threading.Thread): """Base class for thread that captures MIDI into a NoteSequence proto. If neither `stop_time` nor `stop_signal` are provided as arguments, the capture will continue until the `stop` method is called. Args: qpm: The quarters per minute to use for the captured sequence. start_time: The float wall time in seconds when the capture begins. Events occuring before this time are ignored. stop_time: The float wall time in seconds when the capture is to be stopped or None. stop_signal: A MidiSignal to use as a signal to stop capture. """ _metaclass__ = abc.ABCMeta # A message that is used to wake the consumer thread. _WAKE_MESSAGE = None def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None): # A lock for synchronization. self._lock = threading.RLock() self._receive_queue = Queue.Queue() self._captured_sequence = music_pb2.NoteSequence() self._captured_sequence.tempos.add(qpm=qpm) self._start_time = start_time self._stop_time = stop_time self._stop_regex = re.compile(str(stop_signal)) # A set of active MidiSignals being used by iterators. self._iter_signals = [] # An event that is set when `stop` has been called. self._stop_signal = threading.Event() # Active callback threads keyed by unique thread name. self._callbacks = {} super(MidiCaptor, self).__init__() @property @concurrency.serialized def start_time(self): return self._start_time @start_time.setter @concurrency.serialized def start_time(self, value): """Updates the start time, removing any notes that started before it.""" self._start_time = value i = 0 for note in self._captured_sequence.notes: if note.start_time >= self._start_time: break i += 1 del self._captured_sequence.notes[:i] @property @concurrency.serialized def _stop_time(self): return self._stop_time_unsafe @_stop_time.setter @concurrency.serialized def _stop_time(self, value): self._stop_time_unsafe = value def receive(self, msg): """Adds received mido.Message to the queue for capture. Args: msg: The incoming mido.Message object to add to the queue for capture. The time attribute is assumed to be pre-set with the wall time when the message was received. Raises: MidiHubError: When the received message has an empty time attribute. """ if not msg.time: raise MidiHubError( 'MidiCaptor received message with empty time attribute: %s' % msg) self._receive_queue.put(msg) @abc.abstractmethod def _capture_message(self, msg): """Handles a single incoming MIDI message during capture. Must be serialized in children. Args: msg: The incoming mido.Message object to capture. The time field is assumed to be pre-filled with the wall time when the message was received. """ pass def _add_note(self, msg): """Adds and returns a new open note based on the MIDI message.""" new_note = self._captured_sequence.notes.add() new_note.start_time = msg.time new_note.pitch = msg.note new_note.velocity = msg.velocity new_note.is_drum = (msg.channel == _DRUM_CHANNEL) return new_note def run(self): """Captures incoming messages until stop time or signal received.""" while True: timeout = None stop_time = self._stop_time if stop_time is not None: timeout = stop_time - time.time() if timeout <= 0: break try: msg = self._receive_queue.get(block=True, timeout=timeout) except Queue.Empty: continue if msg is MidiCaptor._WAKE_MESSAGE: continue if msg.time <= self._start_time: continue if self._stop_regex.match(str(msg)) is not None: break with self._lock: msg_str = str(msg) for regex, queue in self._iter_signals: if regex.match(msg_str) is not None: queue.put(msg.copy()) self._capture_message(msg) stop_time = self._stop_time end_time = stop_time if stop_time is not None else msg.time # Acquire lock to avoid race condition with `iterate`. with self._lock: # Set final captured sequence. self._captured_sequence = self.captured_sequence(end_time) # Wake up all generators. for regex, queue in self._iter_signals: queue.put(MidiCaptor._WAKE_MESSAGE) def stop(self, stop_time=None, block=True): """Ends capture and truncates the captured sequence at `stop_time`. Args: stop_time: The float time in seconds to stop the capture, or None if it should be stopped now. May be in the past, in which case the captured sequence will be truncated appropriately. block: If True, blocks until the thread terminates. Raises: MidiHubError: When called multiple times with a `stop_time`. """ with self._lock: if self._stop_signal.is_set(): if stop_time is not None: raise MidiHubError( '`stop` must not be called multiple times with a `stop_time` on ' 'MidiCaptor.') else: self._stop_signal.set() self._stop_time = time.time() if stop_time is None else stop_time # Force the thread to wake since we've updated the stop time. self._receive_queue.put(MidiCaptor._WAKE_MESSAGE) if block: self.join() def captured_sequence(self, end_time=None): """Returns a copy of the current captured sequence. If called before the thread terminates, `end_time` is required and any open notes will have their end time set to it, any notes starting after it will be removed, and any notes ending after it will be truncated. `total_time` will also be set to `end_time`. Args: end_time: The float time in seconds to close any open notes and after which to close or truncate notes, if the thread is still alive. Otherwise, must be None. Returns: A copy of the current captured NoteSequence proto with open notes closed at and later notes removed or truncated to `end_time`. Raises: MidiHubError: When the thread is alive and `end_time` is None or the thread is terminated and `end_time` is not None. """ # Make a copy of the sequence currently being captured. current_captured_sequence = music_pb2.NoteSequence() with self._lock: current_captured_sequence.CopyFrom(self._captured_sequence) if self.is_alive(): if end_time is None: raise MidiHubError( '`end_time` must be provided when capture thread is still running.') for i, note in enumerate(current_captured_sequence.notes): if note.start_time >= end_time: del current_captured_sequence.notes[i:] break if not note.end_time or note.end_time > end_time: note.end_time = end_time current_captured_sequence.total_time = end_time elif end_time is not None: raise MidiHubError( '`end_time` must not be provided when capture is complete.') return current_captured_sequence def iterate(self, signal=None, period=None): """Yields the captured sequence at every signal message or time period. Exactly one of `signal` or `period` must be specified. Continues until the captor terminates, at which point the final captured sequence is yielded before returning. If consecutive calls to iterate are longer than the period, immediately yields and logs a warning. Args: signal: A MidiSignal to use as a signal to yield, or None. period: A float period in seconds, or None. Yields: The captured NoteSequence at event time. Raises: MidiHubError: If neither `signal` nor `period` or both are specified. """ if (signal, period).count(None) != 1: raise MidiHubError( 'Exactly one of `signal` or `period` must be provided to `iterate` ' 'call.') if signal is None: sleeper = concurrency.Sleeper() next_yield_time = time.time() + period else: regex = re.compile(str(signal)) queue = Queue.Queue() with self._lock: self._iter_signals.append((regex, queue)) while self.is_alive(): if signal is None: skipped_periods = (time.time() - next_yield_time) // period if skipped_periods > 0: tf.logging.warn( 'Skipping %d %.3fs period(s) to catch up on iteration.', skipped_periods, period) next_yield_time += skipped_periods * period else: sleeper.sleep_until(next_yield_time) end_time = next_yield_time next_yield_time += period else: signal_msg = queue.get() if signal_msg is MidiCaptor._WAKE_MESSAGE: # This is only recieved when the thread is in the process of # terminating. Wait until it is done before yielding the final # sequence. self.join() break end_time = signal_msg.time # Acquire lock so that `captured_sequence` will be called before thread # terminates, if it has not already done so. with self._lock: if not self.is_alive(): break captured_sequence = self.captured_sequence(end_time) yield captured_sequence yield self.captured_sequence() def register_callback(self, fn, signal=None, period=None): """Calls `fn` at every signal message or time period. The callback function must take exactly one argument, which will be the current captured NoteSequence. Exactly one of `signal` or `period` must be specified. Continues until the captor thread terminates, at which point the callback is called with the final sequence, or `cancel_callback` is called. If callback execution is longer than a period, immediately calls upon completion and logs a warning. Args: fn: The callback function to call, passing in the captured sequence. signal: A MidiSignal to use as a signal to call `fn` on the current captured sequence, or None. period: A float period in seconds to specify how often to call `fn`, or None. Returns: The unqiue name of the callback thread to enable cancellation. Raises: MidiHubError: If neither `signal` nor `period` or both are specified. """ class IteratorCallback(threading.Thread): """A thread for executing a callback on each iteration.""" def __init__(self, iterator, fn): self._iterator = iterator self._fn = fn self._stop_signal = threading.Event() super(IteratorCallback, self).__init__() def run(self): """Calls the callback function for each iterator value.""" for captured_sequence in self._iterator: if self._stop_signal.is_set(): break self._fn(captured_sequence) def stop(self): """Stops the thread on next iteration, without blocking.""" self._stop_signal.set() t = IteratorCallback(self.iterate(signal, period), fn) t.start() with self._lock: assert t.name not in self._callbacks self._callbacks[t.name] = t return t.name @concurrency.serialized def cancel_callback(self, name): """Cancels the callback with the given name. While the thread may continue to run until the next iteration, the callback function will not be executed. Args: name: The unique name of the callback thread to cancel. """ self._callbacks[name].stop() del self._callbacks[name] class MonophonicMidiCaptor(MidiCaptor): """A MidiCaptor for monophonic melodies.""" def __init__(self, *args, **kwargs): self._open_note = None super(MonophonicMidiCaptor, self).__init__(*args, **kwargs) @concurrency.serialized def _capture_message(self, msg): """Handles a single incoming MIDI message during capture. If the message is a note_on event, ends the previous note (if applicable) and opens a new note in the capture sequence. Ignores repeated note_on events. If the message is a note_off event matching the current open note in the capture sequence Args: msg: The mido.Message MIDI message to handle. """ if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0): if self._open_note is None or msg.note != self._open_note.pitch: # This is not the note we're looking for. Drop it. return self._open_note.end_time = msg.time self._open_note = None elif msg.type == 'note_on': if self._open_note: if self._open_note.pitch == msg.note: # This is just a repeat of the previous message. return # End the previous note. self._open_note.end_time = msg.time self._open_note = self._add_note(msg) class PolyphonicMidiCaptor(MidiCaptor): """A MidiCaptor for polyphonic melodies.""" def __init__(self, *args, **kwargs): # A dictionary of open NoteSequence.Note messages keyed by pitch. self._open_notes = dict() super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs) @concurrency.serialized def _capture_message(self, msg): """Handles a single incoming MIDI message during capture. Args: msg: The mido.Message MIDI message to handle. """ if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0): if msg.note not in self._open_notes: # This is not a note we're looking for. Drop it. return self._open_notes[msg.note].end_time = msg.time del self._open_notes[msg.note] elif msg.type == 'note_on': if msg.note in self._open_notes: # This is likely just a repeat of the previous message. return new_note = self._add_note(msg) self._open_notes[new_note.pitch] = new_note class TextureType(object): """An Enum specifying the type of musical texture.""" MONOPHONIC = 1 POLYPHONIC = 2 class MidiHub(object): """A MIDI interface for capturing and playing NoteSequences. Ignores/filters `program_change` messages. Assumes all messages are on the same channel. Args: input_midi_port: The string MIDI port name or mido.ports.BaseInput object to use for input. If a name is given that is not an available port, a virtual port will be opened with that name. output_midi_port: The string MIDI port name mido.ports.BaseOutput object to use for output. If a name is given that is not an available port, a virtual port will be opened with that name. texture_type: A TextureType Enum specifying the musical texture to assume during capture, passthrough, and playback. passthrough: A boolean specifying whether or not to pass incoming messages through to the output, applying the appropriate texture rules. playback_channel: The MIDI channel to send playback events. playback_offset: The float time in seconds to adjust the playback event times by. """ def __init__(self, input_midi_ports, output_midi_ports, texture_type, passthrough=True, playback_channel=0, playback_offset=0.0): self._texture_type = texture_type self._passthrough = passthrough self._playback_channel = playback_channel self._playback_offset = playback_offset # When `passthrough` is True, this is the set of open MIDI note pitches. self._open_notes = set() # This lock is used by the serialized decorator. self._lock = threading.RLock() # A dictionary mapping a compiled MidiSignal regex to a condition variable # that will be notified when a matching messsage is received. self._signals = {} # A dictionary mapping a compiled MidiSignal regex to a list of functions # that will be called with the triggering message in individual threads when # a matching message is received. self._callbacks = collections.defaultdict(list) # A dictionary mapping integer control numbers to most recently-received # integer value. self._control_values = {} # Threads actively being used to capture incoming messages. self._captors = [] # Potentially active player threads. self._players = [] self._metronome = None # Open MIDI ports. inports = [] if input_midi_ports: for port in input_midi_ports: if isinstance(port, mido.ports.BaseInput): inport = port else: virtual = port not in get_available_input_ports() if virtual: tf.logging.info( "Opening '%s' as a virtual MIDI port for input.", port) inport = mido.open_input(port, virtual=virtual) # Start processing incoming messages. inport.callback = self._timestamp_and_handle_message inports.append(inport) # Keep references to input ports to prevent deletion. self._inports = inports else: tf.logging.warn('No input port specified. Capture disabled.') self._inports = None outports = [] for port in output_midi_ports: if isinstance(port, mido.ports.BaseOutput): outports.append(port) else: virtual = port not in get_available_output_ports() if virtual: tf.logging.info( "Opening '%s' as a virtual MIDI port for output.", port) outports.append(mido.open_output(port, virtual=virtual)) self._outport = mido.ports.MultiPort(outports) def __del__(self): """Stops all running threads and waits for them to terminate.""" for captor in self._captors: captor.stop(block=False) for player in self._players: player.stop(block=False) self.stop_metronome() for captor in self._captors: captor.join() for player in self._players: player.join() @property @concurrency.serialized def passthrough(self): return self._passthrough @passthrough.setter @concurrency.serialized def passthrough(self, value): """Sets passthrough value, closing all open notes if being disabled.""" if self._passthrough == value: return # Close all open notes. while self._open_notes: self._outport.send(mido.Message('note_off', note=self._open_notes.pop())) self._passthrough = value def _timestamp_and_handle_message(self, msg): """Stamps message with current time and passes it to the handler.""" if msg.type == 'program_change': return if not msg.time: msg.time = time.time() self._handle_message(msg) @concurrency.serialized def _handle_message(self, msg): """Handles a single incoming MIDI message. -If the message is being used as a signal, notifies threads waiting on the appropriate condition variable. -Adds the message to any capture queues. -Passes the message through to the output port, if appropriate. Args: msg: The mido.Message MIDI message to handle. """ # Notify any threads waiting for this message. msg_str = str(msg) for regex in list(self._signals): if regex.match(msg_str) is not None: self._signals[regex].notify_all() del self._signals[regex] # Call any callbacks waiting for this message. for regex in list(self._callbacks): if regex.match(msg_str) is not None: for fn in self._callbacks[regex]: threading.Thread(target=fn, args=(msg,)).start() del self._callbacks[regex] # Remove any captors that are no longer alive. self._captors[:] = [t for t in self._captors if t.is_alive()] # Add a different copy of the message to the receive queue of each live # capture thread. for t in self._captors: t.receive(msg.copy()) # Update control values if this is a control change message. if msg.type == 'control_change': if self._control_values.get(msg.control, None) != msg.value: tf.logging.debug('Control change %d: %d', msg.control, msg.value) self._control_values[msg.control] = msg.value # Pass the message through to the output port, if appropriate. if not self._passthrough: pass elif self._texture_type == TextureType.POLYPHONIC: if msg.type == 'note_on' and msg.velocity > 0: self._open_notes.add(msg.note) elif (msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0)): self._open_notes.discard(msg.note) self._outport.send(msg) elif self._texture_type == TextureType.MONOPHONIC: assert len(self._open_notes) <= 1 if msg.type not in ['note_on', 'note_off']: self._outport.send(msg) elif ((msg.type == 'note_off' or msg.type == 'note_on' and msg.velocity == 0) and msg.note in self._open_notes): self._outport.send(msg) self._open_notes.remove(msg.note) elif msg.type == 'note_on' and msg.velocity > 0: if self._open_notes: self._outport.send( mido.Message('note_off', note=self._open_notes.pop())) self._outport.send(msg) self._open_notes.add(msg.note) def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None): """Starts a MidiCaptor to compile incoming messages into a NoteSequence. If neither `stop_time` nor `stop_signal`, are provided, the caller must explicitly stop the returned capture thread. If both are specified, the one that occurs first will stop the capture. Args: qpm: The integer quarters per minute to use for the captured sequence. start_time: The float wall time in seconds to start the capture. May be in the past. Used for beat alignment. stop_time: The optional float wall time in seconds to stop the capture. stop_signal: The optional mido.Message to use as a signal to use to stop the capture. Returns: The MidiCaptor thread. """ if self._texture_type == TextureType.MONOPHONIC: captor_class = MonophonicMidiCaptor else: captor_class = PolyphonicMidiCaptor captor = captor_class(qpm, start_time, stop_time, stop_signal) with self._lock: self._captors.append(captor) captor.start() return captor def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None): """Compiles and returns incoming messages into a NoteSequence. Blocks until capture stops. At least one of `stop_time` or `stop_signal` must be specified. If both are specified, the one that occurs first will stop the capture. Args: qpm: The integer quarters per minute to use for the captured sequence. start_time: The float wall time in seconds to start the capture. May be in the past. Used for beat alignment. stop_time: The optional float wall time in seconds to stop the capture. stop_signal: The optional mido.Message to use as a signal to use to stop the capture. Returns: The captured NoteSequence proto. Raises: MidiHubError: When neither `stop_time` nor `stop_signal` are provided. """ if stop_time is None and stop_signal is None: raise MidiHubError( 'At least one of `stop_time` and `stop_signal` must be provided to ' '`capture_sequence` call.') captor = self.start_capture(qpm, start_time, stop_time, stop_signal) captor.join() return captor.captured_sequence() @concurrency.serialized def wait_for_event(self, signal=None, timeout=None): """Blocks until a matching mido.Message arrives or the timeout occurs. Exactly one of `signal` or `timeout` must be specified. Using a timeout with a threading.Condition object causes additional delays when notified. Args: signal: A MidiSignal to use as a signal to stop waiting, or None. timeout: A float timeout in seconds, or None. Raises: MidiHubError: If neither `signal` nor `timeout` or both are specified. """ if (signal, timeout).count(None) != 1: raise MidiHubError( 'Exactly one of `signal` or `timeout` must be provided to ' '`wait_for_event` call.') if signal is None: concurrency.Sleeper().sleep(timeout) return signal_pattern = str(signal) cond_var = None for regex, cond_var in self._signals: if regex.pattern == signal_pattern: break if cond_var is None: cond_var = threading.Condition(self._lock) self._signals[re.compile(signal_pattern)] = cond_var cond_var.wait() @concurrency.serialized def wake_signal_waiters(self, signal=None): """Wakes all threads waiting on a signal event. Args: signal: The MidiSignal to wake threads waiting on, or None to wake all. """ for regex in list(self._signals): if signal is None or regex.pattern == str(signal): self._signals[regex].notify_all() del self._signals[regex] for captor in self._captors: captor.wake_signal_waiters(signal) @concurrency.serialized def start_metronome(self, qpm, start_time, signals=None, channel=None): """Starts or updates the metronome with the given arguments. Args: qpm: The quarter notes per minute to use. start_time: The wall time in seconds that the metronome is started on for synchronization and beat alignment. May be in the past. signals: An ordered collection of MidiSignals whose underlying messages are to be output on the metronome's tick, cyclically. A None value can be used in place of a MidiSignal to output nothing on a given tick. channel: The MIDI channel to output ticks on. """ if self._metronome is not None and self._metronome.is_alive(): self._metronome.update( qpm, start_time, signals=signals, channel=channel) else: self._metronome = Metronome( self._outport, qpm, start_time, signals=signals, channel=channel) self._metronome.start() @concurrency.serialized def stop_metronome(self, stop_time=0, block=True): """Stops the metronome at the given time if it is currently running. Args: stop_time: The float wall time in seconds after which the metronome should stop. By default, stops at next tick. block: If true, blocks until metronome is stopped. """ if self._metronome is None: return self._metronome.stop(stop_time, block) self._metronome = None def start_playback(self, sequence, start_time=time.time(), allow_updates=False): """Plays the notes in aNoteSequence via the MIDI output port. Args: sequence: The NoteSequence to play, with times based on the wall clock. start_time: The float time before which to strip events. Defaults to call time. Events before this time will be sent immediately on start. allow_updates: A boolean specifying whether or not the player should stay allow the sequence to be updated and stay alive until `stop` is called. Returns: The MidiPlayer thread handling playback to enable updating. """ player = MidiPlayer(self._outport, sequence, start_time, allow_updates, self._playback_channel, self._playback_offset) with self._lock: self._players.append(player) player.start() return player @concurrency.serialized def control_value(self, control_number): """Returns the most recently received value for the given control number. Args: control_number: The integer control number to return the value for, or None. Returns: The most recently recieved integer value for the given control number, or None if no values have been received for that control. """ if control_number is None: return None return self._control_values.get(control_number) def send_control_change(self, control_number, value): """Sends the specified control change message on the output port.""" self._outport.send( mido.Message( type='control_change', control=control_number, value=value)) @concurrency.serialized def register_callback(self, fn, signal): """Calls `fn` at the next signal message. The callback function must take exactly one argument, which will be the message triggering the signal. Survives until signal is called or the MidiHub is destroyed. Args: fn: The callback function to call, passing in the triggering message. signal: A MidiSignal to use as a signal to call `fn` on the triggering message. """ self._callbacks[re.compile(str(signal))].append(fn)
undertaker.py
# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <vgaronne@gmail.com>, 2013-2018 # - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2015 # - Martin Barisits <martin.barisits@cern.ch>, 2016 ''' Undertaker is a daemon to manage expired did. ''' import logging import os import sys import socket import threading import time import traceback from rucio.common.config import config_get from rucio.common.exception import DatabaseException, RuleNotFound from rucio.common.utils import chunks from rucio.core.heartbeat import live, die, sanity_check from rucio.core.monitor import record_counter from rucio.core.did import list_expired_dids, delete_dids logging.getLogger("requests").setLevel(logging.CRITICAL) logging.basicConfig(stream=sys.stdout, level=getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper()), format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s') GRACEFUL_STOP = threading.Event() def undertaker(worker_number=1, total_workers=1, chunk_size=5, once=False): """ Main loop to select and delete dids. """ logging.info('Undertaker(%s): starting', worker_number) logging.info('Undertaker(%s): started', worker_number) hostname = socket.gethostname() pid = os.getpid() thread = threading.current_thread() sanity_check(executable='rucio-undertaker', hostname=hostname) while not GRACEFUL_STOP.is_set(): try: heartbeat = live(executable='rucio-undertaker', hostname=hostname, pid=pid, thread=thread, older_than=6000) logging.info('Undertaker({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals())) dids = list_expired_dids(worker_number=heartbeat['assign_thread'] + 1, total_workers=heartbeat['nr_threads'], limit=10000) if not dids and not once: logging.info('Undertaker(%s): Nothing to do. sleep 60.', worker_number) time.sleep(60) continue for chunk in chunks(dids, chunk_size): try: logging.info('Undertaker(%s): Receive %s dids to delete', worker_number, len(chunk)) delete_dids(dids=chunk, account='root') logging.info('Undertaker(%s): Delete %s dids', worker_number, len(chunk)) record_counter(counters='undertaker.delete_dids', delta=len(chunk)) except RuleNotFound, error: logging.error(error) except DatabaseException, error: logging.error('Undertaker(%s): Got database error %s.', worker_number, str(error)) except: logging.critical(traceback.format_exc()) time.sleep(1) if once: break die(executable='rucio-undertaker', hostname=hostname, pid=pid, thread=thread) logging.info('Undertaker(%s): graceful stop requested', worker_number) logging.info('Undertaker(%s): graceful stop done', worker_number) def stop(signum=None, frame=None): """ Graceful exit. """ GRACEFUL_STOP.set() def run(once=False, total_workers=1, chunk_size=10): """ Starts up the undertaker threads. """ logging.info('main: starting threads') threads = [threading.Thread(target=undertaker, kwargs={'worker_number': i, 'total_workers': total_workers, 'once': once, 'chunk_size': chunk_size}) for i in xrange(1, total_workers + 1)] [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
main.py
#! /usr/bin/env python3 import argparse import threading import logging from datetime import datetime from scapy.all import sniff, Ether, IP from .database import create_tables, Entity, create_session, drop_tables from queue import Queue packet_queue = Queue() def on_packet(p): if Ether not in p or IP not in p: return packet_queue.put(p) def process_data(): packet_count = 0 while packet := packet_queue.get(): packet_count += 1 if packet_count % 100 == 0: logging.info(f'Queue size: {packet_queue.qsize()}') mac = packet[Ether].src ip = packet[IP].src session = create_session() query = session.query(Entity).filter_by(mac=mac, ip=ip) if query.count() > 0: entity = query.first() entity.last_seen = datetime.now() session.commit() logging.debug(f"Updated last_seen column for {ip} {mac}") session.close() continue entity = Entity(mac=mac, ip=ip, last_seen=datetime.now()) session.add(entity) session.commit() logging.info(f'Added entity {entity}') session.close() def main(): parser = argparse.ArgumentParser(description='Minidetector is an example tool for detecting network identities and insert them into a postgres database') parser.add_argument("--clean", const=True, default=False, nargs='?', help="prune the existing data before starting") parser.add_argument("--debug", const=True, default=False, nargs='?', help="enable debug logging") args = parser.parse_args() logging.root.setLevel(logging.DEBUG if args.debug else logging.INFO) if args.clean: logging.debug('Dropping all tables') drop_tables() logging.debug('Creating all tables') create_tables() logging.debug('Starting sniffing thread') sniffing_thread = threading.Thread(target=lambda: sniff(prn=on_packet), daemon=True) sniffing_thread.start() logging.debug('Starting to process packets') process_data() if __name__ == '__main__': main()
wait_storage.py
# # Copyright 2018 PyWren Team # Copyright IBM Corp. 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import json import time import pickle import random import logging from threading import Thread from multiprocessing.pool import ThreadPool from lithops.storage.utils import create_status_key from lithops.config import JOBS_PREFIX logger = logging.getLogger(__name__) ALL_COMPLETED = 1 ANY_COMPLETED = 2 ALWAYS = 3 def wait_storage(fs, internal_storage, download_results=False, throw_except=True, pbar=None, return_when=ALL_COMPLETED, THREADPOOL_SIZE=128, WAIT_DUR_SEC=1): """ Wait for the Future instances `fs` to complete. Returns a 2-tuple of lists. The first list contains the futures that completed (finished or cancelled) before the wait completed. The second contains uncompleted futures. :param futures: A list of futures. :param executor_id: executor's ID. :param internal_storage: Storage handler to poll cloud storage. :param download_results: Download the results: Ture, False. :param pbar: Progress bar. :param return_when: One of `ALL_COMPLETED`, `ANY_COMPLETED`, `ALWAYS` :param THREADPOOL_SIZE: Number of threads to use. Default 128 :param WAIT_DUR_SEC: Time interval between each check. :return: `(fs_dones, fs_notdones)` where `fs_dones` is a list of futures that have completed and `fs_notdones` is a list of futures that have not completed. :rtype: 2-tuple of lists """ N = len(fs) # These are performance-related settings that we may eventually # want to expose to end users: MAX_DIRECT_QUERY_N = 64 RETURN_EARLY_N = 32 RANDOM_QUERY = False running_futures = set() ftc = Thread(target=_future_timeout_checker_thread, args=(running_futures, internal_storage, throw_except)) ftc.daemon = True ftc.start() if return_when == ALL_COMPLETED: result_count = 0 while result_count < N: fs_dones, fs_notdones = _wait_storage(fs, running_futures, internal_storage, download_results, throw_except, RETURN_EARLY_N, MAX_DIRECT_QUERY_N, pbar=pbar, random_query=RANDOM_QUERY, THREADPOOL_SIZE=THREADPOOL_SIZE) N = len(fs) result_count = len(fs_dones) if result_count == N: return fs_dones, fs_notdones else: sleep = WAIT_DUR_SEC if fs_dones: sleep = max(float(round(WAIT_DUR_SEC-((len(fs_dones)/N)*WAIT_DUR_SEC), 3)), 0) #print("Sleep:", sleep) time.sleep(sleep) #print('---') elif return_when == ANY_COMPLETED: while True: fs_dones, fs_notdones = _wait_storage(fs, running_futures, internal_storage, download_results, throw_except, RETURN_EARLY_N, MAX_DIRECT_QUERY_N, random_query=RANDOM_QUERY, THREADPOOL_SIZE=THREADPOOL_SIZE) if len(fs_dones) != 0: return fs_dones, fs_notdones else: time.sleep(WAIT_DUR_SEC) elif return_when == ALWAYS: return _wait_storage(fs, running_futures, internal_storage, download_results, throw_except, RETURN_EARLY_N, MAX_DIRECT_QUERY_N, random_query=RANDOM_QUERY, THREADPOOL_SIZE=THREADPOOL_SIZE) else: raise ValueError() def _wait_storage(fs, running_futures, internal_storage, download_results, throw_except, return_early_n, max_direct_query_n, pbar=None, random_query=False, THREADPOOL_SIZE=128): """ internal function that performs the majority of the WAIT task work. For the list of futures fn, we will check at a minimum `max_direct_query_n` futures at least once. Internally we : 1. use list() to quickly get a list of which ones are done (but list can be behind due to eventual consistency issues) 2. then individually call get_status on at most `max_direct_query_n` returning early if we have found at least `return_early_n` This can mitigate the stragglers. random_query decides whether we get the fs in the order they are presented or in a random order. """ # get all the futures that are not yet done if download_results: not_done_futures = [f for f in fs if not f.done] else: not_done_futures = [f for f in fs if not (f.ready or f.done)] if len(not_done_futures) == 0: return fs, [] present_jobs = {(f.executor_id, f.job_id) for f in not_done_futures} still_not_done_futures = [] while present_jobs: executor_id, job_id = present_jobs.pop() # note this returns everything done, so we have to figure out # the intersection of those that are done current_time = time.time() callids_running_in_job, callids_done_in_job = internal_storage.get_job_status(executor_id, job_id) for f in not_done_futures: for call in callids_running_in_job: if (f.executor_id, f.job_id, f.call_id) == call[0]: if f.invoked and f not in running_futures: f.activation_id = call[1] f._call_status = {'type': '__init__', 'activation_id': call[1], 'start_time': current_time} f.status(throw_except=throw_except, internal_storage=internal_storage) running_futures.add(f) # print('Time getting job status: {} - Running: {} - Done: {}' # .format(round(time.time()-current_time, 3), len(callids_running_in_job), len(callids_done_in_job))) not_done_call_ids = set([(f.executor_id, f.job_id, f.call_id) for f in not_done_futures]) done_call_ids = not_done_call_ids.intersection(callids_done_in_job) not_done_call_ids = not_done_call_ids - done_call_ids still_not_done_futures += [f for f in not_done_futures if ((f.executor_id, f.job_id, f.call_id) in not_done_call_ids)] def fetch_future_status(f): return internal_storage.get_call_status(f.executor_id, f.job_id, f.call_id) pool = ThreadPool(THREADPOOL_SIZE) # now try up to max_direct_query_n direct status queries, quitting once # we have return_n done. query_count = 0 max_queries = min(max_direct_query_n, len(still_not_done_futures)) if random_query: random.shuffle(still_not_done_futures) while query_count < max_queries: if len(done_call_ids) >= return_early_n: break num_to_query_at_once = THREADPOOL_SIZE fs_to_query = still_not_done_futures[query_count:query_count + num_to_query_at_once] fs_statuses = pool.map(fetch_future_status, fs_to_query) callids_found = [(fs_to_query[i].executor_id, fs_to_query[i].job_id, fs_to_query[i].call_id) for i in range(len(fs_to_query)) if fs_statuses[i] is not None] # print('FOUND:', callids_found, len(callids_found)) done_call_ids = done_call_ids.union(set(callids_found)) query_count += len(fs_to_query) # now we walk through all the original queries and get # the ones that are actually done. fs_dones = [] fs_notdones = [] f_to_wait_on = [] for f in fs: if (download_results and f.done) or (not download_results and (f.ready or f.done)): # done, don't need to do anything fs_dones.append(f) else: if (f.executor_id, f.job_id, f.call_id) in done_call_ids: f_to_wait_on.append(f) fs_dones.append(f) else: fs_notdones.append(f) def get_result(f): if f.running: f._call_status = None f.result(throw_except=throw_except, internal_storage=internal_storage) def get_status(f): if f.running: f._call_status = None f.status(throw_except=throw_except, internal_storage=internal_storage) # with ThreadPoolExecutor(max_workers=THREADPOOL_SIZE) as executor: # if download_results: # executor.map(get_result, f_to_wait_on) # else: # executor.map(get_status, f_to_wait_on) if download_results: pool.map(get_result, f_to_wait_on) else: pool.map(get_status, f_to_wait_on) if pbar: for f in f_to_wait_on: if (download_results and f.done) or (not download_results and (f.ready or f.done)): pbar.update(1) pbar.refresh() pool.close() pool.join() # Check for new futures new_futures = [f.result() for f in f_to_wait_on if f.futures] for futures in new_futures: fs.extend(futures) if pbar: pbar.total = pbar.total + len(futures) pbar.refresh() return fs_dones, fs_notdones def _future_timeout_checker_thread(running_futures, internal_storage, throw_except): should_run = True while should_run: try: while True: current_time = time.time() for fut in running_futures: if fut.running and fut._call_status: fut_timeout = fut._call_status['start_time'] + fut.execution_timeout + 5 if current_time > fut_timeout: msg = 'The function did not run as expected.' raise TimeoutError('HANDLER', msg) time.sleep(5) except TimeoutError: # generate fake TimeoutError call status pickled_exception = str(pickle.dumps(sys.exc_info())) call_status = {'type': '__end__', 'exception': True, 'exc_info': pickled_exception, 'executor_id': fut.executor_id, 'job_id': fut.job_id, 'call_id': fut.call_id, 'activation_id': fut.activation_id} status_key = create_status_key(JOBS_PREFIX, fut.executor_id, fut.job_id, fut.call_id) dmpd_response_status = json.dumps(call_status) internal_storage.put_data(status_key, dmpd_response_status) if throw_except: should_run = False except Exception: pass
main.py
# -*- mode: python -*- import sys sys.setrecursionlimit(5000) block_cipher = None import pyautogui import keyboard import asyncio import tkinter as tk from tkinter import ttk from tkinter import * import threading import multiprocessing import time import os import psutil global iteration iteration = 0 def checkIfProcessRunning(processName): ''' Check if there is any running process that contains the given name processName. ''' #Iterate over the all the running process for proc in psutil.process_iter(): try: # Check if process name contains the given name string. if processName.lower() in proc.name().lower(): return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False; def main(iteration): while keyboard.is_pressed('end') == False: def okey(): ok = pyautogui.locateOnScreen('assets/ok.jpg', grayscale=True, confidence=0.92) if ok != None: pyautogui.leftClick(ok) time.sleep(0.25) def okey1(): ok1 = pyautogui.locateOnScreen('assets/ok1.jpg', grayscale=True, confidence=0.92) if ok1 != None: pyautogui.leftClick(ok1) time.sleep(0.25) def useit(): use = pyautogui.locateOnScreen('assets/use.jpg', grayscale=True, confidence=0.85) if use != None: pyautogui.leftClick(use) time.sleep(0.25) def takeit(): take = pyautogui.locateOnScreen('assets/take.jpg', grayscale=True, confidence=0.92) if take != None: pyautogui.leftClick(take) time.sleep(0.25) return True if checkIfProcessRunning('wolfteam.bin'): pass else: var2.set("Oyununuz Açık Değil!") useit() okey() var.set(f"Açılan Kutu Sayısı: {iteration}") if takeit() == True: iteration += 1 var.set(f"Açılan Kutu Sayısı: {iteration}") var1.set("Durum : Kutu Açılıyor!") else: var1.set(f"Durum: Kutu Bekleniyor...") okey1() def starter(iteration): while True: if keyboard.is_pressed('f1'): if checkIfProcessRunning('wolfteam.bin'): var2.set("TAMAM!") main(iteration) else: var2.set("Uyarı: Oyununuz Açık Değil!") root = Tk() root.resizable(0, 0) var = StringVar() var1 = StringVar() var2 = StringVar() root.geometry('400x150') root.iconbitmap('assets/favicon.ico') root.configure(background='#F0F8FF') root.title('WT BOX OPENER') Label(root, text='Başlatmak İçin \'F1\' Tuşuna Basın.', bg='#F0F8FF', font=('arial', 12, 'normal')).place(x=12, y=14) Label(root, text='Kapatmak İçin \'END\' Tuşuna Basın.', bg='#F0F8FF', font=('arial', 12, 'normal')).place(x=12, y=34) Label(root, textvariable=var, bg='#F0F8FF', foreground="green",font=('arial', 12, 'normal')).place(x=12, y=54) Label(root, textvariable=var1, bg='#F0F8FF', foreground="blue",font=('arial', 12, 'normal')).place(x=12, y=74) Label(root, textvariable=var2, bg='#F0F8FF', foreground="red", font=('arial', 12, 'normal')).place(x=12, y=94) x = threading.Thread(target=starter,args=[iteration]) x.start() root.mainloop()
test_enum.py
import enum import inspect import pydoc import sys import unittest import threading from collections import OrderedDict from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto from io import StringIO from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL from test.support import ALWAYS_EQ, check__all__, threading_helper from datetime import timedelta # for pickle tests try: class Stooges(Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: Stooges = exc try: class IntStooges(int, Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: IntStooges = exc try: class FloatStooges(float, Enum): LARRY = 1.39 CURLY = 2.72 MOE = 3.142596 except Exception as exc: FloatStooges = exc try: class FlagStooges(Flag): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: FlagStooges = exc # for pickle test and subclass tests try: class StrEnum(str, Enum): 'accepts only string values' class Name(StrEnum): BDFL = 'Guido van Rossum' FLUFL = 'Barry Warsaw' except Exception as exc: Name = exc try: Question = Enum('Question', 'who what when where why', module=__name__) except Exception as exc: Question = exc try: Answer = Enum('Answer', 'him this then there because') except Exception as exc: Answer = exc try: Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition') except Exception as exc: Theory = exc # for doctests try: class Fruit(Enum): TOMATO = 1 BANANA = 2 CHERRY = 3 except Exception: pass def test_pickle_dump_load(assertion, source, target=None): if target is None: target = source for protocol in range(HIGHEST_PROTOCOL + 1): assertion(loads(dumps(source, protocol=protocol)), target) def test_pickle_exception(assertion, exception, obj): for protocol in range(HIGHEST_PROTOCOL + 1): with assertion(exception): dumps(obj, protocol=protocol) class TestHelpers(unittest.TestCase): # _is_descriptor, _is_sunder, _is_dunder def test_is_descriptor(self): class foo: pass for attr in ('__get__','__set__','__delete__'): obj = foo() self.assertFalse(enum._is_descriptor(obj)) setattr(obj, attr, 1) self.assertTrue(enum._is_descriptor(obj)) def test_is_sunder(self): for s in ('_a_', '_aa_'): self.assertTrue(enum._is_sunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_sunder(s)) def test_is_dunder(self): for s in ('__a__', '__aa__'): self.assertTrue(enum._is_dunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_dunder(s)) # for subclassing tests class classproperty: def __init__(self, fget=None, fset=None, fdel=None, doc=None): self.fget = fget self.fset = fset self.fdel = fdel if doc is None and fget is not None: doc = fget.__doc__ self.__doc__ = doc def __get__(self, instance, ownerclass): return self.fget(ownerclass) # tests class TestEnum(unittest.TestCase): def setUp(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 self.Season = Season class Konstants(float, Enum): E = 2.7182818 PI = 3.1415926 TAU = 2 * PI self.Konstants = Konstants class Grades(IntEnum): A = 5 B = 4 C = 3 D = 2 F = 0 self.Grades = Grades class Directional(str, Enum): EAST = 'east' WEST = 'west' NORTH = 'north' SOUTH = 'south' self.Directional = Directional from datetime import date class Holiday(date, Enum): NEW_YEAR = 2013, 1, 1 IDES_OF_MARCH = 2013, 3, 15 self.Holiday = Holiday def test_dir_on_class(self): Season = self.Season self.assertEqual( set(dir(Season)), set(['__class__', '__doc__', '__members__', '__module__', 'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']), ) def test_dir_on_item(self): Season = self.Season self.assertEqual( set(dir(Season.WINTER)), set(['__class__', '__doc__', '__module__', 'name', 'value']), ) def test_dir_with_added_behavior(self): class Test(Enum): this = 'that' these = 'those' def wowser(self): return ("Wowser! I'm %s!" % self.name) self.assertEqual( set(dir(Test)), set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']), ) self.assertEqual( set(dir(Test.this)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']), ) def test_dir_on_sub_with_behavior_on_super(self): # see issue22506 class SuperEnum(Enum): def invisible(self): return "did you see me?" class SubEnum(SuperEnum): sample = 5 self.assertEqual( set(dir(SubEnum.sample)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']), ) def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self): # see issue40084 class SuperEnum(IntEnum): def __new__(cls, value, description=""): obj = int.__new__(cls, value) obj._value_ = value obj.description = description return obj class SubEnum(SuperEnum): sample = 5 self.assertTrue({'description'} <= set(dir(SubEnum.sample))) def test_enum_in_enum_out(self): Season = self.Season self.assertIs(Season(Season.WINTER), Season.WINTER) def test_enum_value(self): Season = self.Season self.assertEqual(Season.SPRING.value, 1) def test_intenum_value(self): self.assertEqual(IntStooges.CURLY.value, 2) def test_enum(self): Season = self.Season lst = list(Season) self.assertEqual(len(lst), len(Season)) self.assertEqual(len(Season), 4, Season) self.assertEqual( [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst) for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1): e = Season(i) self.assertEqual(e, getattr(Season, season)) self.assertEqual(e.value, i) self.assertNotEqual(e, i) self.assertEqual(e.name, season) self.assertIn(e, Season) self.assertIs(type(e), Season) self.assertIsInstance(e, Season) self.assertEqual(str(e), 'Season.' + season) self.assertEqual( repr(e), '<Season.{0}: {1}>'.format(season, i), ) def test_value_name(self): Season = self.Season self.assertEqual(Season.SPRING.name, 'SPRING') self.assertEqual(Season.SPRING.value, 1) with self.assertRaises(AttributeError): Season.SPRING.name = 'invierno' with self.assertRaises(AttributeError): Season.SPRING.value = 2 def test_changing_member(self): Season = self.Season with self.assertRaises(AttributeError): Season.WINTER = 'really cold' def test_attribute_deletion(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 def spam(cls): pass self.assertTrue(hasattr(Season, 'spam')) del Season.spam self.assertFalse(hasattr(Season, 'spam')) with self.assertRaises(AttributeError): del Season.SPRING with self.assertRaises(AttributeError): del Season.DRY with self.assertRaises(AttributeError): del Season.SPRING.name def test_bool_of_class(self): class Empty(Enum): pass self.assertTrue(bool(Empty)) def test_bool_of_member(self): class Count(Enum): zero = 0 one = 1 two = 2 for member in Count: self.assertTrue(bool(member)) def test_invalid_names(self): with self.assertRaises(ValueError): class Wrong(Enum): mro = 9 with self.assertRaises(ValueError): class Wrong(Enum): _create_= 11 with self.assertRaises(ValueError): class Wrong(Enum): _get_mixins_ = 9 with self.assertRaises(ValueError): class Wrong(Enum): _find_new_ = 1 with self.assertRaises(ValueError): class Wrong(Enum): _any_name_ = 9 def test_bool(self): # plain Enum members are always True class Logic(Enum): true = True false = False self.assertTrue(Logic.true) self.assertTrue(Logic.false) # unless overridden class RealLogic(Enum): true = True false = False def __bool__(self): return bool(self._value_) self.assertTrue(RealLogic.true) self.assertFalse(RealLogic.false) # mixed Enums depend on mixed-in type class IntLogic(int, Enum): true = 1 false = 0 self.assertTrue(IntLogic.true) self.assertFalse(IntLogic.false) def test_contains(self): Season = self.Season self.assertIn(Season.AUTUMN, Season) with self.assertRaises(TypeError): 3 in Season with self.assertRaises(TypeError): 'AUTUMN' in Season val = Season(3) self.assertIn(val, Season) class OtherEnum(Enum): one = 1; two = 2 self.assertNotIn(OtherEnum.two, Season) def test_comparisons(self): Season = self.Season with self.assertRaises(TypeError): Season.SPRING < Season.WINTER with self.assertRaises(TypeError): Season.SPRING > 4 self.assertNotEqual(Season.SPRING, 1) class Part(Enum): SPRING = 1 CLIP = 2 BARREL = 3 self.assertNotEqual(Season.SPRING, Part.SPRING) with self.assertRaises(TypeError): Season.SPRING < Part.CLIP def test_enum_duplicates(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = FALL = 3 WINTER = 4 ANOTHER_SPRING = 1 lst = list(Season) self.assertEqual( lst, [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER, ]) self.assertIs(Season.FALL, Season.AUTUMN) self.assertEqual(Season.FALL.value, 3) self.assertEqual(Season.AUTUMN.value, 3) self.assertIs(Season(3), Season.AUTUMN) self.assertIs(Season(1), Season.SPRING) self.assertEqual(Season.FALL.name, 'AUTUMN') self.assertEqual( [k for k,v in Season.__members__.items() if v.name != k], ['FALL', 'ANOTHER_SPRING'], ) def test_duplicate_name(self): with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 red = 4 with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 def red(self): return 'red' with self.assertRaises(TypeError): class Color(Enum): @property def red(self): return 'redder' red = 1 green = 2 blue = 3 # TODO: RUSTPYTHON @unittest.expectedFailure def test_enum_with_value_name(self): class Huh(Enum): name = 1 value = 2 self.assertEqual( list(Huh), [Huh.name, Huh.value], ) self.assertIs(type(Huh.name), Huh) self.assertEqual(Huh.name.name, 'name') self.assertEqual(Huh.name.value, 1) def test_format_enum(self): Season = self.Season self.assertEqual('{}'.format(Season.SPRING), '{}'.format(str(Season.SPRING))) self.assertEqual( '{:}'.format(Season.SPRING), '{:}'.format(str(Season.SPRING))) self.assertEqual('{:20}'.format(Season.SPRING), '{:20}'.format(str(Season.SPRING))) self.assertEqual('{:^20}'.format(Season.SPRING), '{:^20}'.format(str(Season.SPRING))) self.assertEqual('{:>20}'.format(Season.SPRING), '{:>20}'.format(str(Season.SPRING))) self.assertEqual('{:<20}'.format(Season.SPRING), '{:<20}'.format(str(Season.SPRING))) def test_str_override_enum(self): class EnumWithStrOverrides(Enum): one = auto() two = auto() def __str__(self): return 'Str!' self.assertEqual(str(EnumWithStrOverrides.one), 'Str!') self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!') def test_format_override_enum(self): class EnumWithFormatOverride(Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'Format!!' self.assertEqual(str(EnumWithFormatOverride.one), 'EnumWithFormatOverride.one') self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!') def test_str_and_format_override_enum(self): class EnumWithStrFormatOverrides(Enum): one = auto() two = auto() def __str__(self): return 'Str!' def __format__(self, spec): return 'Format!' self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!') self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!') def test_str_override_mixin(self): class MixinEnumWithStrOverride(float, Enum): one = 1.0 two = 2.0 def __str__(self): return 'Overridden!' self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!') self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!') def test_str_and_format_override_mixin(self): class MixinWithStrFormatOverrides(float, Enum): one = 1.0 two = 2.0 def __str__(self): return 'Str!' def __format__(self, spec): return 'Format!' self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!') self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!') def test_format_override_mixin(self): class TestFloat(float, Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'TestFloat success!' self.assertEqual(str(TestFloat.one), 'TestFloat.one') self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!') def assertFormatIsValue(self, spec, member): self.assertEqual(spec.format(member), spec.format(member.value)) def test_format_enum_date(self): Holiday = self.Holiday self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH) # TODO: RUSTPYTHON @unittest.expectedFailure def test_format_enum_float(self): Konstants = self.Konstants self.assertFormatIsValue('{}', Konstants.TAU) self.assertFormatIsValue('{:}', Konstants.TAU) self.assertFormatIsValue('{:20}', Konstants.TAU) self.assertFormatIsValue('{:^20}', Konstants.TAU) self.assertFormatIsValue('{:>20}', Konstants.TAU) self.assertFormatIsValue('{:<20}', Konstants.TAU) self.assertFormatIsValue('{:n}', Konstants.TAU) self.assertFormatIsValue('{:5.2}', Konstants.TAU) self.assertFormatIsValue('{:f}', Konstants.TAU) def test_format_enum_int(self): Grades = self.Grades self.assertFormatIsValue('{}', Grades.C) self.assertFormatIsValue('{:}', Grades.C) self.assertFormatIsValue('{:20}', Grades.C) self.assertFormatIsValue('{:^20}', Grades.C) self.assertFormatIsValue('{:>20}', Grades.C) self.assertFormatIsValue('{:<20}', Grades.C) self.assertFormatIsValue('{:+}', Grades.C) self.assertFormatIsValue('{:08X}', Grades.C) self.assertFormatIsValue('{:b}', Grades.C) def test_format_enum_str(self): Directional = self.Directional self.assertFormatIsValue('{}', Directional.WEST) self.assertFormatIsValue('{:}', Directional.WEST) self.assertFormatIsValue('{:20}', Directional.WEST) self.assertFormatIsValue('{:^20}', Directional.WEST) self.assertFormatIsValue('{:>20}', Directional.WEST) self.assertFormatIsValue('{:<20}', Directional.WEST) def test_object_str_override(self): class Colors(Enum): RED, GREEN, BLUE = 1, 2, 3 def __repr__(self): return "test.%s" % (self._name_, ) __str__ = object.__str__ self.assertEqual(str(Colors.RED), 'test.RED') def test_enum_str_override(self): class MyStrEnum(Enum): def __str__(self): return 'MyStr' class MyMethodEnum(Enum): def hello(self): return 'Hello! My name is %s' % self.name class Test1Enum(MyMethodEnum, int, MyStrEnum): One = 1 Two = 2 self.assertTrue(Test1Enum._member_type_ is int) self.assertEqual(str(Test1Enum.One), 'MyStr') self.assertEqual(format(Test1Enum.One, ''), 'MyStr') # class Test2Enum(MyStrEnum, MyMethodEnum): One = 1 Two = 2 self.assertEqual(str(Test2Enum.One), 'MyStr') self.assertEqual(format(Test1Enum.One, ''), 'MyStr') def test_inherited_data_type(self): class HexInt(int): def __repr__(self): return hex(self) class MyEnum(HexInt, enum.Enum): A = 1 B = 2 C = 3 self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>') def test_too_many_data_types(self): with self.assertRaisesRegex(TypeError, 'too many data types'): class Huh(str, int, Enum): One = 1 class MyStr(str): def hello(self): return 'hello, %s' % self class MyInt(int): def repr(self): return hex(self) with self.assertRaisesRegex(TypeError, 'too many data types'): class Huh(MyStr, MyInt, Enum): One = 1 def test_hash(self): Season = self.Season dates = {} dates[Season.WINTER] = '1225' dates[Season.SPRING] = '0315' dates[Season.SUMMER] = '0704' dates[Season.AUTUMN] = '1031' self.assertEqual(dates[Season.AUTUMN], '1031') def test_intenum_from_scratch(self): class phy(int, Enum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_intenum_inherited(self): class IntEnum(int, Enum): pass class phy(IntEnum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_from_scratch(self): class phy(float, Enum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_inherited(self): class FloatEnum(float, Enum): pass class phy(FloatEnum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_strenum_from_scratch(self): class phy(str, Enum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_strenum_inherited(self): class StrEnum(str, Enum): pass class phy(StrEnum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_intenum(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c') self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2]) lst = list(WeekDay) self.assertEqual(len(lst), len(WeekDay)) self.assertEqual(len(WeekDay), 7) target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY' target = target.split() for i, weekday in enumerate(target, 1): e = WeekDay(i) self.assertEqual(e, i) self.assertEqual(int(e), i) self.assertEqual(e.name, weekday) self.assertIn(e, WeekDay) self.assertEqual(lst.index(e)+1, i) self.assertTrue(0 < e < 8) self.assertIs(type(e), WeekDay) self.assertIsInstance(e, int) self.assertIsInstance(e, Enum) def test_intenum_duplicates(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = TEUSDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY) self.assertEqual(WeekDay(3).name, 'TUESDAY') self.assertEqual([k for k,v in WeekDay.__members__.items() if v.name != k], ['TEUSDAY', ]) # TODO: RUSTPYTHON @unittest.expectedFailure def test_intenum_from_bytes(self): self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE) with self.assertRaises(ValueError): IntStooges.from_bytes(b'\x00\x05', 'big') # TODO: RUSTPYTHON @unittest.expectedFailure def test_floatenum_fromhex(self): h = float.hex(FloatStooges.MOE.value) self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE) h = float.hex(FloatStooges.MOE.value + 0.01) with self.assertRaises(ValueError): FloatStooges.fromhex(h) def test_pickle_enum(self): if isinstance(Stooges, Exception): raise Stooges test_pickle_dump_load(self.assertIs, Stooges.CURLY) test_pickle_dump_load(self.assertIs, Stooges) def test_pickle_int(self): if isinstance(IntStooges, Exception): raise IntStooges test_pickle_dump_load(self.assertIs, IntStooges.CURLY) test_pickle_dump_load(self.assertIs, IntStooges) def test_pickle_float(self): if isinstance(FloatStooges, Exception): raise FloatStooges test_pickle_dump_load(self.assertIs, FloatStooges.CURLY) test_pickle_dump_load(self.assertIs, FloatStooges) # TODO: RUSTPYTHON @unittest.expectedFailure def test_pickle_enum_function(self): if isinstance(Answer, Exception): raise Answer test_pickle_dump_load(self.assertIs, Answer.him) test_pickle_dump_load(self.assertIs, Answer) # TODO: RUSTPYTHON @unittest.expectedFailure def test_pickle_enum_function_with_module(self): if isinstance(Question, Exception): raise Question test_pickle_dump_load(self.assertIs, Question.who) test_pickle_dump_load(self.assertIs, Question) def test_enum_function_with_qualname(self): if isinstance(Theory, Exception): raise Theory self.assertEqual(Theory.__qualname__, 'spanish_inquisition') def test_class_nested_enum_and_pickle_protocol_four(self): # would normally just have this directly in the class namespace class NestedEnum(Enum): twigs = 'common' shiny = 'rare' self.__class__.NestedEnum = NestedEnum self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__ test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs) def test_pickle_by_name(self): class ReplaceGlobalInt(IntEnum): ONE = 1 TWO = 2 ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name for proto in range(HIGHEST_PROTOCOL): self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO') def test_exploding_pickle(self): BadPickle = Enum( 'BadPickle', 'dill sweet bread-n-butter', module=__name__) globals()['BadPickle'] = BadPickle # now break BadPickle to test exception raising enum._make_class_unpicklable(BadPickle) test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill) test_pickle_exception(self.assertRaises, PicklingError, BadPickle) def test_string_enum(self): class SkillLevel(str, Enum): master = 'what is the sound of one hand clapping?' journeyman = 'why did the chicken cross the road?' apprentice = 'knock, knock!' self.assertEqual(SkillLevel.apprentice, 'knock, knock!') def test_getattr_getitem(self): class Period(Enum): morning = 1 noon = 2 evening = 3 night = 4 self.assertIs(Period(2), Period.noon) self.assertIs(getattr(Period, 'night'), Period.night) self.assertIs(Period['morning'], Period.morning) def test_getattr_dunder(self): Season = self.Season self.assertTrue(getattr(Season, '__eq__')) def test_iteration_order(self): class Season(Enum): SUMMER = 2 WINTER = 4 AUTUMN = 3 SPRING = 1 self.assertEqual( list(Season), [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING], ) def test_reversed_iteration_order(self): self.assertEqual( list(reversed(self.Season)), [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER, self.Season.SPRING] ) def test_programmatic_function_string(self): SummerMonth = Enum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', start=10) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 10): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august']) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list_with_start(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 20): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_iterable(self): SummerMonth = Enum( 'SummerMonth', (('june', 1), ('july', 2), ('august', 3)) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_from_dict(self): SummerMonth = Enum( 'SummerMonth', OrderedDict((('june', 1), ('july', 2), ('august', 3))) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 30): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass_with_start(self): SummerMonth = IntEnum('SummerMonth', 'june july august', start=40) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 40): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) # TODO: RUSTPYTHON @unittest.expectedFailure def test_subclassing(self): if isinstance(Name, Exception): raise Name self.assertEqual(Name.BDFL, 'Guido van Rossum') self.assertTrue(Name.BDFL, Name('Guido van Rossum')) self.assertIs(Name.BDFL, getattr(Name, 'BDFL')) test_pickle_dump_load(self.assertIs, Name.BDFL) def test_extending(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"): class EvenMoreColor(Color, IntEnum): chartruese = 7 def test_exclude_methods(self): class whatever(Enum): this = 'that' these = 'those' def really(self): return 'no, not %s' % self.value self.assertIsNot(type(whatever.really), whatever) self.assertEqual(whatever.this.really(), 'no, not that') def test_wrong_inheritance_order(self): with self.assertRaises(TypeError): class Wrong(Enum, str): NotHere = 'error before this point' def test_intenum_transitivity(self): class number(IntEnum): one = 1 two = 2 three = 3 class numero(IntEnum): uno = 1 dos = 2 tres = 3 self.assertEqual(number.one, numero.uno) self.assertEqual(number.two, numero.dos) self.assertEqual(number.three, numero.tres) def test_wrong_enum_in_call(self): class Monochrome(Enum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_wrong_enum_in_mixed_call(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_mixed_enum_in_call_1(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.female), Monochrome.white) def test_mixed_enum_in_call_2(self): class Monochrome(Enum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.male), Monochrome.black) def test_flufl_enum(self): class Fluflnum(Enum): def __int__(self): return int(self.value) class MailManOptions(Fluflnum): option1 = 1 option2 = 2 option3 = 3 self.assertEqual(int(MailManOptions.option1), 1) def test_introspection(self): class Number(IntEnum): one = 100 two = 200 self.assertIs(Number.one._member_type_, int) self.assertIs(Number._member_type_, int) class String(str, Enum): yarn = 'soft' rope = 'rough' wire = 'hard' self.assertIs(String.yarn._member_type_, str) self.assertIs(String._member_type_, str) class Plain(Enum): vanilla = 'white' one = 1 self.assertIs(Plain.vanilla._member_type_, object) self.assertIs(Plain._member_type_, object) def test_no_such_enum_member(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): Color(4) with self.assertRaises(KeyError): Color['chartreuse'] def test_new_repr(self): class Color(Enum): red = 1 green = 2 blue = 3 def __repr__(self): return "don't you just love shades of %s?" % self.name self.assertEqual( repr(Color.blue), "don't you just love shades of blue?", ) def test_inherited_repr(self): class MyEnum(Enum): def __repr__(self): return "My name is %s." % self.name class MyIntEnum(int, MyEnum): this = 1 that = 2 theother = 3 self.assertEqual(repr(MyIntEnum.that), "My name is that.") def test_multiple_mixin_mro(self): class auto_enum(type(Enum)): def __new__(metacls, cls, bases, classdict): temp = type(classdict)() names = set(classdict._member_names) i = 0 for k in classdict._member_names: v = classdict[k] if v is Ellipsis: v = i else: i = v i += 1 temp[k] = v for k, v in classdict.items(): if k not in names: temp[k] = v return super(auto_enum, metacls).__new__( metacls, cls, bases, temp) class AutoNumberedEnum(Enum, metaclass=auto_enum): pass class AutoIntEnum(IntEnum, metaclass=auto_enum): pass class TestAutoNumber(AutoNumberedEnum): a = ... b = 3 c = ... class TestAutoInt(AutoIntEnum): a = ... b = 3 c = ... def test_subclasses_with_getnewargs(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs__(self): return self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) # TODO: RUSTPYTHON @unittest.expectedFailure def test_subclasses_with_getnewargs_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs_ex__(self): return self._args, {} @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce__(self): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce_ex__(self, proto): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_without_direct_pickle_support(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_exception(self.assertRaises, TypeError, NEI.x) test_pickle_exception(self.assertRaises, PicklingError, NEI) def test_subclasses_without_direct_pickle_support_using_name(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) def __reduce_ex__(self, proto): return getattr, (self.__class__, self._name_) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_tuple_subclass(self): class SomeTuple(tuple, Enum): __qualname__ = 'SomeTuple' # needed for pickle protocol 4 first = (1, 'for the money') second = (2, 'for the show') third = (3, 'for the music') self.assertIs(type(SomeTuple.first), SomeTuple) self.assertIsInstance(SomeTuple.second, tuple) self.assertEqual(SomeTuple.third, (3, 'for the music')) globals()['SomeTuple'] = SomeTuple test_pickle_dump_load(self.assertIs, SomeTuple.first) # TODO: RUSTPYTHON @unittest.expectedFailure def test_duplicate_values_give_unique_enum_items(self): class AutoNumber(Enum): first = () second = () third = () def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) self.assertEqual( list(AutoNumber), [AutoNumber.first, AutoNumber.second, AutoNumber.third], ) self.assertEqual(int(AutoNumber.second), 2) self.assertEqual(AutoNumber.third.value, 3) self.assertIs(AutoNumber(1), AutoNumber.first) # TODO: RUSTPYTHON @unittest.expectedFailure def test_inherited_new_from_enhanced_enum(self): class AutoNumber(Enum): def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) # TODO: RUSTPYTHON @unittest.expectedFailure def test_inherited_new_from_mixed_enum(self): class AutoNumber(IntEnum): def __new__(cls): value = len(cls.__members__) + 1 obj = int.__new__(cls, value) obj._value_ = value return obj class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_equality(self): class AlwaysEqual: def __eq__(self, other): return True class OrdinaryEnum(Enum): a = 1 self.assertEqual(AlwaysEqual(), OrdinaryEnum.a) self.assertEqual(OrdinaryEnum.a, AlwaysEqual()) def test_ordered_mixin(self): class OrderedEnum(Enum): def __ge__(self, other): if self.__class__ is other.__class__: return self._value_ >= other._value_ return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self._value_ > other._value_ return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self._value_ <= other._value_ return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self._value_ < other._value_ return NotImplemented class Grade(OrderedEnum): A = 5 B = 4 C = 3 D = 2 F = 1 self.assertGreater(Grade.A, Grade.B) self.assertLessEqual(Grade.F, Grade.C) self.assertLess(Grade.D, Grade.A) self.assertGreaterEqual(Grade.B, Grade.B) self.assertEqual(Grade.B, Grade.B) self.assertNotEqual(Grade.C, Grade.D) def test_extending2(self): class Shade(Enum): def shade(self): print(self.name) class Color(Shade): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 def test_extending3(self): class Shade(Enum): def shade(self): return self.name class Color(Shade): def hex(self): return '%s hexlified!' % self.value class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!') def test_subclass_duplicate_name(self): class Base(Enum): def test(self): pass class Test(Base): test = 1 self.assertIs(type(Test.test), Test) # TODO: RUSTPYTHON @unittest.expectedFailure def test_subclass_duplicate_name_dynamic(self): from types import DynamicClassAttribute class Base(Enum): @DynamicClassAttribute def test(self): return 'dynamic' class Test(Base): test = 1 self.assertEqual(Test.test.test, 'dynamic') def test_no_duplicates(self): class UniqueEnum(Enum): def __init__(self, *args): cls = self.__class__ if any(self.value == e.value for e in cls): a = self.name e = cls(self.value).name raise ValueError( "aliases not allowed in UniqueEnum: %r --> %r" % (a, e) ) class Color(UniqueEnum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): class Color(UniqueEnum): red = 1 green = 2 blue = 3 grene = 2 def test_init(self): class Planet(Enum): MERCURY = (3.303e+23, 2.4397e6) VENUS = (4.869e+24, 6.0518e6) EARTH = (5.976e+24, 6.37814e6) MARS = (6.421e+23, 3.3972e6) JUPITER = (1.9e+27, 7.1492e7) SATURN = (5.688e+26, 6.0268e7) URANUS = (8.686e+25, 2.5559e7) NEPTUNE = (1.024e+26, 2.4746e7) def __init__(self, mass, radius): self.mass = mass # in kilograms self.radius = radius # in meters @property def surface_gravity(self): # universal gravitational constant (m3 kg-1 s-2) G = 6.67300E-11 return G * self.mass / (self.radius * self.radius) self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80) self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6)) def test_ignore(self): class Period(timedelta, Enum): ''' different lengths of time ''' def __new__(cls, value, period): obj = timedelta.__new__(cls, value) obj._value_ = value obj.period = period return obj _ignore_ = 'Period i' Period = vars() for i in range(13): Period['month_%d' % i] = i*30, 'month' for i in range(53): Period['week_%d' % i] = i*7, 'week' for i in range(32): Period['day_%d' % i] = i, 'day' OneDay = day_1 OneWeek = week_1 OneMonth = month_1 self.assertFalse(hasattr(Period, '_ignore_')) self.assertFalse(hasattr(Period, 'Period')) self.assertFalse(hasattr(Period, 'i')) self.assertTrue(isinstance(Period.day_1, timedelta)) self.assertTrue(Period.month_1 is Period.day_30) self.assertTrue(Period.week_4 is Period.day_28) # TODO: RUSTPYTHON @unittest.expectedFailure def test_nonhash_value(self): class AutoNumberInAList(Enum): def __new__(cls): value = [len(cls.__members__) + 1] obj = object.__new__(cls) obj._value_ = value return obj class ColorInAList(AutoNumberInAList): red = () green = () blue = () self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue]) for enum, value in zip(ColorInAList, range(3)): value += 1 self.assertEqual(enum.value, [value]) self.assertIs(ColorInAList([value]), enum) def test_conflicting_types_resolved_in_new(self): class LabelledIntEnum(int, Enum): def __new__(cls, *args): value, label = args obj = int.__new__(cls, value) obj.label = label obj._value_ = value return obj class LabelledList(LabelledIntEnum): unprocessed = (1, "Unprocessed") payment_complete = (2, "Payment Complete") self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete]) self.assertEqual(LabelledList.unprocessed, 1) self.assertEqual(LabelledList(1), LabelledList.unprocessed) def test_auto_number(self): class Color(Enum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_auto_name(self): class Color(Enum): def _generate_next_value_(name, start, count, last): return name red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_name_inherit(self): class AutoNameEnum(Enum): def _generate_next_value_(name, start, count, last): return name class Color(AutoNameEnum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_garbage(self): class Color(Enum): red = 'red' blue = auto() self.assertEqual(Color.blue.value, 1) def test_auto_garbage_corrected(self): class Color(Enum): red = 'red' blue = 2 green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_auto_order(self): with self.assertRaises(TypeError): class Color(Enum): red = auto() green = auto() blue = auto() def _generate_next_value_(name, start, count, last): return name def test_auto_order_wierd(self): weird_auto = auto() weird_auto.value = 'pathological case' class Color(Enum): red = weird_auto def _generate_next_value_(name, start, count, last): return name blue = auto() self.assertEqual(list(Color), [Color.red, Color.blue]) self.assertEqual(Color.red.value, 'pathological case') self.assertEqual(Color.blue.value, 'blue') def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) def test_default_missing(self): class Color(Enum): RED = 1 GREEN = 2 BLUE = 3 try: Color(7) except ValueError as exc: self.assertTrue(exc.__context__ is None) else: raise Exception('Exception not raised.') def test_missing(self): class Color(Enum): red = 1 green = 2 blue = 3 @classmethod def _missing_(cls, item): if item == 'three': return cls.blue elif item == 'bad return': # trigger internal error return 5 elif item == 'error out': raise ZeroDivisionError else: # trigger not found return None self.assertIs(Color('three'), Color.blue) try: Color(7) except ValueError as exc: self.assertTrue(exc.__context__ is None) else: raise Exception('Exception not raised.') try: Color('bad return') except TypeError as exc: self.assertTrue(isinstance(exc.__context__, ValueError)) else: raise Exception('Exception not raised.') try: Color('error out') except ZeroDivisionError as exc: self.assertTrue(isinstance(exc.__context__, ValueError)) else: raise Exception('Exception not raised.') def test_multiple_mixin(self): class MaxMixin: @classproperty def MAX(cls): max = len(cls) cls.MAX = max return max class StrMixin: def __str__(self): return self._name_.lower() class SomeEnum(Enum): def behavior(self): return 'booyah' class AnotherEnum(Enum): def behavior(self): return 'nuhuh!' def social(self): return "what's up?" class Color(MaxMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'Color.BLUE') class Color(MaxMixin, StrMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, MaxMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'blue') class CoolColor(StrMixin, SomeEnum, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolColor.RED.value, 1) self.assertEqual(CoolColor.GREEN.value, 2) self.assertEqual(CoolColor.BLUE.value, 3) self.assertEqual(str(CoolColor.BLUE), 'blue') self.assertEqual(CoolColor.RED.behavior(), 'booyah') class CoolerColor(StrMixin, AnotherEnum, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolerColor.RED.value, 1) self.assertEqual(CoolerColor.GREEN.value, 2) self.assertEqual(CoolerColor.BLUE.value, 3) self.assertEqual(str(CoolerColor.BLUE), 'blue') self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!') self.assertEqual(CoolerColor.RED.social(), "what's up?") class CoolestColor(StrMixin, SomeEnum, AnotherEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolestColor.RED.value, 1) self.assertEqual(CoolestColor.GREEN.value, 2) self.assertEqual(CoolestColor.BLUE.value, 3) self.assertEqual(str(CoolestColor.BLUE), 'blue') self.assertEqual(CoolestColor.RED.behavior(), 'booyah') self.assertEqual(CoolestColor.RED.social(), "what's up?") class ConfusedColor(StrMixin, AnotherEnum, SomeEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(ConfusedColor.RED.value, 1) self.assertEqual(ConfusedColor.GREEN.value, 2) self.assertEqual(ConfusedColor.BLUE.value, 3) self.assertEqual(str(ConfusedColor.BLUE), 'blue') self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!') self.assertEqual(ConfusedColor.RED.social(), "what's up?") class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(ReformedColor.RED.value, 1) self.assertEqual(ReformedColor.GREEN.value, 2) self.assertEqual(ReformedColor.BLUE.value, 3) self.assertEqual(str(ReformedColor.BLUE), 'blue') self.assertEqual(ReformedColor.RED.behavior(), 'booyah') self.assertEqual(ConfusedColor.RED.social(), "what's up?") self.assertTrue(issubclass(ReformedColor, int)) def test_multiple_inherited_mixin(self): class StrEnum(str, Enum): def __new__(cls, *args, **kwargs): for a in args: if not isinstance(a, str): raise TypeError("Enumeration '%s' (%s) is not" " a string" % (a, type(a).__name__)) return str.__new__(cls, *args, **kwargs) @unique class Decision1(StrEnum): REVERT = "REVERT" REVERT_ALL = "REVERT_ALL" RETRY = "RETRY" class MyEnum(StrEnum): pass @unique class Decision2(MyEnum): REVERT = "REVERT" REVERT_ALL = "REVERT_ALL" RETRY = "RETRY" def test_multiple_mixin_inherited(self): class MyInt(int): def __new__(cls, value): return super().__new__(cls, value) class HexMixin: def __repr__(self): return hex(self) class MyIntEnum(HexMixin, MyInt, enum.Enum): pass class Foo(MyIntEnum): TEST = 1 self.assertTrue(isinstance(Foo.TEST, MyInt)) self.assertEqual(repr(Foo.TEST), "0x1") class Fee(MyIntEnum): TEST = 1 def __new__(cls, value): value += 1 member = int.__new__(cls, value) member._value_ = value return member self.assertEqual(Fee.TEST, 2) def test_empty_globals(self): # bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError # when using compile and exec because f_globals is empty code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')" code = compile(code, "<string>", "exec") global_ns = {} local_ls = {} exec(code, global_ns, local_ls) class TestOrder(unittest.TestCase): def test_same_members(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 def test_same_members_with_aliases(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 verde = green def test_same_members_wrong_order(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 blue = 3 green = 2 def test_order_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 def test_order_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 verde = green def test_enum_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 def test_enum_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 verde = green class TestFlag(unittest.TestCase): """Tests of the Flags.""" class Perm(Flag): R, W, X = 4, 2, 1 class Open(Flag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1<<19 class Color(Flag): BLACK = 0 RED = 1 GREEN = 2 BLUE = 4 PURPLE = RED|BLUE def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'Perm.R') self.assertEqual(str(Perm.W), 'Perm.W') self.assertEqual(str(Perm.X), 'Perm.X') self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X') self.assertEqual(str(Perm(0)), 'Perm.0') self.assertEqual(str(~Perm.R), 'Perm.W|X') self.assertEqual(str(~Perm.W), 'Perm.R|X') self.assertEqual(str(~Perm.X), 'Perm.R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0') self.assertEqual(str(Perm(~0)), 'Perm.R|W|X') Open = self.Open self.assertEqual(str(Open.RO), 'Open.RO') self.assertEqual(str(Open.WO), 'Open.WO') self.assertEqual(str(Open.AC), 'Open.AC') self.assertEqual(str(Open.RO | Open.CE), 'Open.CE') self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO') self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO') self.assertEqual(str(~Open.WO), 'Open.CE|RW') self.assertEqual(str(~Open.AC), 'Open.CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC') self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), '<Perm.R: 4>') self.assertEqual(repr(Perm.W), '<Perm.W: 2>') self.assertEqual(repr(Perm.X), '<Perm.X: 1>') self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>') self.assertEqual(repr(Perm(0)), '<Perm.0: 0>') self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>') self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>') self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>') self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>') self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>') Open = self.Open self.assertEqual(repr(Open.RO), '<Open.RO: 0>') self.assertEqual(repr(Open.WO), '<Open.WO: 1>') self.assertEqual(repr(Open.AC), '<Open.AC: 3>') self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>') self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>') self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>') self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>') self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>') self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>') self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>') def test_format(self): Perm = self.Perm self.assertEqual(format(Perm.R, ''), 'Perm.R') self.assertEqual(format(Perm.R | Perm.X, ''), 'Perm.R|X') def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual((i | j), Perm(i.value | j.value)) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for i in Perm: self.assertIs(i | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual((i & j).value, i.value & j.value) self.assertIs(type(i & j), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & RWX, i) self.assertIs(RWX & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for i in Perm: self.assertIs(i ^ Perm(0), i) self.assertIs(Perm(0) ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) def test_programatic_function_string(self): Perm = Flag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = Flag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = Flag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_pickle(self): if isinstance(FlagStooges, Exception): raise FlagStooges test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE) test_pickle_dump_load(self.assertIs, FlagStooges) def test_contains(self): Open = self.Open Color = self.Color self.assertFalse(Color.BLACK in Open) self.assertFalse(Open.RO in Color) with self.assertRaises(TypeError): 'BLACK' in Color with self.assertRaises(TypeError): 'RO' in Open with self.assertRaises(TypeError): 1 in Color with self.assertRaises(TypeError): 1 in Open def test_member_contains(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) def test_auto_number(self): class Color(Flag): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 4) def test_auto_number_garbage(self): with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'): class Color(Flag): red = 'not an int' blue = auto() def test_cascading_failure(self): class Bizarre(Flag): c = 3 d = 4 f = 6 # Bizarre.c | Bizarre.d self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5) self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5) self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2) self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2) self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1) self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1) def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) def test_bizarre(self): class Bizarre(Flag): b = 3 c = 4 d = 6 self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>') def test_multiple_mixin(self): class AllMixin: @classproperty def ALL(cls): members = list(cls) all_value = None if members: all_value = members[0] for member in members[1:]: all_value |= member cls.ALL = all_value return all_value class StrMixin: def __str__(self): return self._name_.lower() class Color(AllMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'Color.BLUE') class Color(AllMixin, StrMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, AllMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, inconsistent test result on Windows due to threading") @threading_helper.reap_threads def test_unique_composite(self): # override __eq__ to be identity only class TestFlag(Flag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) # have multiple threads competing to complete the composite members seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [ threading.Thread(target=cycle_enum) for _ in range(8) ] with threading_helper.start_threads(threads): pass # check that only 248 members were created self.assertFalse( failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') class TestIntFlag(unittest.TestCase): """Tests of the IntFlags.""" class Perm(IntFlag): X = 1 << 0 W = 1 << 1 R = 1 << 2 class Open(IntFlag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1<<19 class Color(IntFlag): BLACK = 0 RED = 1 GREEN = 2 BLUE = 4 PURPLE = RED|BLUE def test_type(self): Perm = self.Perm self.assertTrue(Perm._member_type_ is int) Open = self.Open for f in Perm: self.assertTrue(isinstance(f, Perm)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Perm.W | Perm.X, Perm)) self.assertEqual(Perm.W | Perm.X, 3) for f in Open: self.assertTrue(isinstance(f, Open)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Open.WO | Open.RW, Open)) self.assertEqual(Open.WO | Open.RW, 3) def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'Perm.R') self.assertEqual(str(Perm.W), 'Perm.W') self.assertEqual(str(Perm.X), 'Perm.X') self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X') self.assertEqual(str(Perm.R | 8), 'Perm.8|R') self.assertEqual(str(Perm(0)), 'Perm.0') self.assertEqual(str(Perm(8)), 'Perm.8') self.assertEqual(str(~Perm.R), 'Perm.W|X') self.assertEqual(str(~Perm.W), 'Perm.R|X') self.assertEqual(str(~Perm.X), 'Perm.R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8') self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X') self.assertEqual(str(Perm(~0)), 'Perm.R|W|X') self.assertEqual(str(Perm(~8)), 'Perm.R|W|X') Open = self.Open self.assertEqual(str(Open.RO), 'Open.RO') self.assertEqual(str(Open.WO), 'Open.WO') self.assertEqual(str(Open.AC), 'Open.AC') self.assertEqual(str(Open.RO | Open.CE), 'Open.CE') self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO') self.assertEqual(str(Open(4)), 'Open.4') self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO') self.assertEqual(str(~Open.WO), 'Open.CE|RW') self.assertEqual(str(~Open.AC), 'Open.CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO') self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW') self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), '<Perm.R: 4>') self.assertEqual(repr(Perm.W), '<Perm.W: 2>') self.assertEqual(repr(Perm.X), '<Perm.X: 1>') self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>') self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>') self.assertEqual(repr(Perm(0)), '<Perm.0: 0>') self.assertEqual(repr(Perm(8)), '<Perm.8: 8>') self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>') self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>') self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>') self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>') self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>') self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>') self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>') Open = self.Open self.assertEqual(repr(Open.RO), '<Open.RO: 0>') self.assertEqual(repr(Open.WO), '<Open.WO: 1>') self.assertEqual(repr(Open.AC), '<Open.AC: 3>') self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>') self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>') self.assertEqual(repr(Open(4)), '<Open.4: 4>') self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>') self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>') self.assertEqual(repr(~Open.AC), '<Open.CE: -4>') self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>') self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>') self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>') def test_format(self): Perm = self.Perm self.assertEqual(format(Perm.R, ''), '4') self.assertEqual(format(Perm.R | Perm.X, ''), '5') # TODO: RUSTPYTHON @unittest.expectedFailure def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i | j, i.value | j.value) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for j in range(8): self.assertEqual(i | j, i.value | j) self.assertEqual((i | j).value, i.value | j) self.assertIs(type(i | j), Perm) self.assertEqual(j | i, j | i.value) self.assertEqual((j | i).value, j | i.value) self.assertIs(type(j | i), Perm) for i in Perm: self.assertIs(i | i, i) self.assertIs(i | 0, i) self.assertIs(0 | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) # TODO: RUSTPYTHON @unittest.expectedFailure def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j)) for j in range(8): self.assertEqual(i & j, i.value & j) self.assertEqual((i & j).value, i.value & j) self.assertIs(type(i & j), Perm) self.assertEqual(j & i, j & i.value) self.assertEqual((j & i).value, j & i.value) self.assertIs(type(j & i), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & 7, i) self.assertIs(7 & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) # TODO: RUSTPYTHON @unittest.expectedFailure def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i ^ j, i.value ^ j.value) self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for j in range(8): self.assertEqual(i ^ j, i.value ^ j) self.assertEqual((i ^ j).value, i.value ^ j) self.assertIs(type(i ^ j), Perm) self.assertEqual(j ^ i, j ^ i.value) self.assertEqual((j ^ i).value, j ^ i.value) self.assertIs(type(j ^ i), Perm) for i in Perm: self.assertIs(i ^ 0, i) self.assertIs(0 ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertEqual(~i, ~i.value) self.assertEqual((~i).value, ~i.value) self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE) def test_programatic_function_string(self): Perm = IntFlag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = IntFlag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = IntFlag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_empty_list(self): Perm = enum.IntFlag('Perm', []) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 0, Perm) Thing = enum.Enum('Thing', []) lst = list(Thing) self.assertEqual(len(lst), len(Thing)) self.assertEqual(len(Thing), 0, Thing) def test_programatic_function_from_empty_tuple(self): Perm = enum.IntFlag('Perm', ()) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 0, Perm) Thing = enum.Enum('Thing', ()) self.assertEqual(len(lst), len(Thing)) self.assertEqual(len(Thing), 0, Thing) def test_contains(self): Open = self.Open Color = self.Color self.assertTrue(Color.GREEN in Color) self.assertTrue(Open.RW in Open) self.assertFalse(Color.GREEN in Open) self.assertFalse(Open.RW in Color) with self.assertRaises(TypeError): 'GREEN' in Color with self.assertRaises(TypeError): 'RW' in Open with self.assertRaises(TypeError): 2 in Color with self.assertRaises(TypeError): 2 in Open def test_member_contains(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) with self.assertRaises(TypeError): self.assertFalse('test' in RW) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) def test_multiple_mixin(self): class AllMixin: @classproperty def ALL(cls): members = list(cls) all_value = None if members: all_value = members[0] for member in members[1:]: all_value |= member cls.ALL = all_value return all_value class StrMixin: def __str__(self): return self._name_.lower() class Color(AllMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'Color.BLUE') class Color(AllMixin, StrMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, AllMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, inconsistent test result on Windows due to threading") @threading_helper.reap_threads def test_unique_composite(self): # override __eq__ to be identity only class TestFlag(IntFlag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) # have multiple threads competing to complete the composite members seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [ threading.Thread(target=cycle_enum) for _ in range(8) ] with threading_helper.start_threads(threads): pass # check that only 248 members were created self.assertFalse( failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') class TestEmptyAndNonLatinStrings(unittest.TestCase): def test_empty_string(self): with self.assertRaises(ValueError): empty_abc = Enum('empty_abc', ('', 'B', 'C')) def test_non_latin_character_string(self): greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C')) item = getattr(greek_abc, '\u03B1') self.assertEqual(item.value, 1) def test_non_latin_number_string(self): hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3')) item = getattr(hebrew_123, '\u05D0') self.assertEqual(item.value, 1) class TestUnique(unittest.TestCase): def test_unique_clean(self): @unique class Clean(Enum): one = 1 two = 'dos' tres = 4.0 @unique class Cleaner(IntEnum): single = 1 double = 2 triple = 3 def test_unique_dirty(self): with self.assertRaisesRegex(ValueError, 'tres.*one'): @unique class Dirty(Enum): one = 1 two = 'dos' tres = 1 with self.assertRaisesRegex( ValueError, 'double.*single.*turkey.*triple', ): @unique class Dirtier(IntEnum): single = 1 double = 1 triple = 3 turkey = 3 def test_unique_with_name(self): @unique class Silly(Enum): one = 1 two = 'dos' name = 3 @unique class Sillier(IntEnum): single = 1 name = 2 triple = 3 value = 4 expected_help_output_with_docs = """\ Help on class Color in module %s: class Color(enum.Enum) | Color(value, names=None, *, module=None, qualname=None, type=None, start=1) |\x20\x20 | An enumeration. |\x20\x20 | Method resolution order: | Color | enum.Enum | builtins.object |\x20\x20 | Data and other attributes defined here: |\x20\x20 | blue = <Color.blue: 3> |\x20\x20 | green = <Color.green: 2> |\x20\x20 | red = <Color.red: 1> |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: |\x20\x20 | name | The name of the Enum member. |\x20\x20 | value | The value of the Enum member. |\x20\x20 | ---------------------------------------------------------------------- | Readonly properties inherited from enum.EnumMeta: |\x20\x20 | __members__ | Returns a mapping of member name->value. |\x20\x20\x20\x20\x20\x20 | This mapping lists all enum members, including aliases. Note that this | is a read-only view of the internal mapping.""" expected_help_output_without_docs = """\ Help on class Color in module %s: class Color(enum.Enum) | Color(value, names=None, *, module=None, qualname=None, type=None, start=1) |\x20\x20 | Method resolution order: | Color | enum.Enum | builtins.object |\x20\x20 | Data and other attributes defined here: |\x20\x20 | blue = <Color.blue: 3> |\x20\x20 | green = <Color.green: 2> |\x20\x20 | red = <Color.red: 1> |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: |\x20\x20 | name |\x20\x20 | value |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.EnumMeta: |\x20\x20 | __members__""" class TestStdLib(unittest.TestCase): maxDiff = None class Color(Enum): red = 1 green = 2 blue = 3 # TODO: RUSTPYTHON @unittest.expectedFailure def test_pydoc(self): # indirectly test __objclass__ if StrEnum.__doc__ is None: expected_text = expected_help_output_without_docs % __name__ else: expected_text = expected_help_output_with_docs % __name__ output = StringIO() helper = pydoc.Helper(output=output) helper(self.Color) result = output.getvalue().strip() self.assertEqual(result, expected_text) # TODO: RUSTPYTHON @unittest.expectedFailure def test_inspect_getmembers(self): values = dict(( ('__class__', EnumMeta), ('__doc__', 'An enumeration.'), ('__members__', self.Color.__members__), ('__module__', __name__), ('blue', self.Color.blue), ('green', self.Color.green), ('name', Enum.__dict__['name']), ('red', self.Color.red), ('value', Enum.__dict__['value']), )) result = dict(inspect.getmembers(self.Color)) self.assertEqual(values.keys(), result.keys()) failed = False for k in values.keys(): if result[k] != values[k]: print() print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' % ('=' * 75, k, result[k], values[k], '=' * 75), sep='') failed = True if failed: self.fail("result does not equal expected, see print above") # TODO: RUSTPYTHON @unittest.expectedFailure def test_inspect_classify_class_attrs(self): # indirectly test __objclass__ from inspect import Attribute values = [ Attribute(name='__class__', kind='data', defining_class=object, object=EnumMeta), Attribute(name='__doc__', kind='data', defining_class=self.Color, object='An enumeration.'), Attribute(name='__members__', kind='property', defining_class=EnumMeta, object=EnumMeta.__members__), Attribute(name='__module__', kind='data', defining_class=self.Color, object=__name__), Attribute(name='blue', kind='data', defining_class=self.Color, object=self.Color.blue), Attribute(name='green', kind='data', defining_class=self.Color, object=self.Color.green), Attribute(name='red', kind='data', defining_class=self.Color, object=self.Color.red), Attribute(name='name', kind='data', defining_class=Enum, object=Enum.__dict__['name']), Attribute(name='value', kind='data', defining_class=Enum, object=Enum.__dict__['value']), ] values.sort(key=lambda item: item.name) result = list(inspect.classify_class_attrs(self.Color)) result.sort(key=lambda item: item.name) failed = False for v, r in zip(values, result): if r != v: print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='') failed = True if failed: self.fail("result does not equal expected, see print above") class MiscTestCase(unittest.TestCase): def test__all__(self): check__all__(self, enum) # These are unordered here on purpose to ensure that declaration order # makes no difference. CONVERT_TEST_NAME_D = 5 CONVERT_TEST_NAME_C = 5 CONVERT_TEST_NAME_B = 5 CONVERT_TEST_NAME_A = 5 # This one should sort first. CONVERT_TEST_NAME_E = 5 CONVERT_TEST_NAME_F = 5 class TestIntEnumConvert(unittest.TestCase): def test_convert_value_lookup_priority(self): test_type = enum.IntEnum._convert_( 'UnittestConvert', ('test.test_enum', '__main__')[__name__=='__main__'], filter=lambda x: x.startswith('CONVERT_TEST_')) # We don't want the reverse lookup value to vary when there are # multiple possible names for a given value. It should always # report the first lexigraphical name in that case. self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A') def test_convert(self): test_type = enum.IntEnum._convert_( 'UnittestConvert', ('test.test_enum', '__main__')[__name__=='__main__'], filter=lambda x: x.startswith('CONVERT_TEST_')) # Ensure that test_type has all of the desired names and values. self.assertEqual(test_type.CONVERT_TEST_NAME_F, test_type.CONVERT_TEST_NAME_A) self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5) # Ensure that test_type only picked up names matching the filter. self.assertEqual([name for name in dir(test_type) if name[0:2] not in ('CO', '__')], [], msg='Names other than CONVERT_TEST_* found.') @unittest.skipUnless(sys.version_info[:2] == (3, 8), '_convert was deprecated in 3.8') def test_convert_warn(self): with self.assertWarns(DeprecationWarning): enum.IntEnum._convert( 'UnittestConvert', ('test.test_enum', '__main__')[__name__=='__main__'], filter=lambda x: x.startswith('CONVERT_TEST_')) # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipUnless(sys.version_info >= (3, 9), '_convert was removed in 3.9') def test_convert_raise(self): with self.assertRaises(AttributeError): enum.IntEnum._convert( 'UnittestConvert', ('test.test_enum', '__main__')[__name__=='__main__'], filter=lambda x: x.startswith('CONVERT_TEST_')) if __name__ == '__main__': unittest.main()
util.py
# Electrum - lightweight Bitcoin client # Copyright (C) 2011 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import binascii import os, sys, re, json from collections import defaultdict from datetime import datetime from decimal import Decimal import traceback import urllib import threading from .i18n import _ import urllib.request, urllib.parse, urllib.error import queue def inv_dict(d): return {v: k for k, v in d.items()} from .i18n import _ import urllib.request, urllib.parse, urllib.error import queue base_units = {'ZENY':8, 'mZENY':5, 'uZENY':2} fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')] def normalize_version(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] class NotEnoughFunds(Exception): pass class InvalidPassword(Exception): def __str__(self): return _("Incorrect password") # Throw this exception to unwind the stack like when an error occurs. # However unlike other exceptions the user won't be informed. class UserCancelled(Exception): '''An exception that is suppressed from the user''' pass class MyEncoder(json.JSONEncoder): def default(self, obj): from .transaction import Transaction if isinstance(obj, Transaction): return obj.as_dict() return super(MyEncoder, self).default(obj) class PrintError(object): '''A handy base class''' def diagnostic_name(self): return self.__class__.__name__ def print_error(self, *msg): print_error("[%s]" % self.diagnostic_name(), *msg) def print_msg(self, *msg): print_msg("[%s]" % self.diagnostic_name(), *msg) class ThreadJob(PrintError): """A job that is run periodically from a thread's main loop. run() is called from that thread's context. """ def run(self): """Called periodically from the thread""" pass class DebugMem(ThreadJob): '''A handy class for debugging GC memory leaks''' def __init__(self, classes, interval=30): self.next_time = 0 self.classes = classes self.interval = interval def mem_stats(self): import gc self.print_error("Start memscan") gc.collect() objmap = defaultdict(list) for obj in gc.get_objects(): for class_ in self.classes: if isinstance(obj, class_): objmap[class_].append(obj) for class_, objs in objmap.items(): self.print_error("%s: %d" % (class_.__name__, len(objs))) self.print_error("Finish memscan") def run(self): if time.time() > self.next_time: self.mem_stats() self.next_time = time.time() + self.interval class DaemonThread(threading.Thread, PrintError): """ daemon thread that terminates cleanly """ def __init__(self): threading.Thread.__init__(self) self.parent_thread = threading.currentThread() self.running = False self.running_lock = threading.Lock() self.job_lock = threading.Lock() self.jobs = [] def add_jobs(self, jobs): with self.job_lock: self.jobs.extend(jobs) def run_jobs(self): # Don't let a throwing job disrupt the thread, future runs of # itself, or other jobs. This is useful protection against # malformed or malicious server responses with self.job_lock: for job in self.jobs: try: job.run() except Exception as e: traceback.print_exc(file=sys.stderr) def remove_jobs(self, jobs): with self.job_lock: for job in jobs: self.jobs.remove(job) def start(self): with self.running_lock: self.running = True return threading.Thread.start(self) def is_running(self): with self.running_lock: return self.running and self.parent_thread.is_alive() def stop(self): with self.running_lock: self.running = False def on_stop(self): if 'ANDROID_DATA' in os.environ: import jnius jnius.detach() self.print_error("jnius detach") self.print_error("stopped") # TODO: disable is_verbose = True def set_verbosity(b): global is_verbose is_verbose = b def print_error(*args): if not is_verbose: return print_stderr(*args) def print_stderr(*args): args = [str(item) for item in args] sys.stderr.write(" ".join(args) + "\n") sys.stderr.flush() def print_msg(*args): # Stringify args args = [str(item) for item in args] sys.stdout.write(" ".join(args) + "\n") sys.stdout.flush() def json_encode(obj): try: s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder) except TypeError: s = repr(obj) return s def json_decode(x): try: return json.loads(x, parse_float=Decimal) except: return x # decorator that prints execution time def profiler(func): def do_profile(func, args, kw_args): n = func.__name__ t0 = time.time() o = func(*args, **kw_args) t = time.time() - t0 print_error("[profiler]", n, "%.4f"%t) return o return lambda *args, **kw_args: do_profile(func, args, kw_args) def android_ext_dir(): import jnius env = jnius.autoclass('android.os.Environment') return env.getExternalStorageDirectory().getPath() def android_data_dir(): import jnius PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity') return PythonActivity.mActivity.getFilesDir().getPath() + '/data' def android_headers_dir(): d = android_ext_dir() + '/org.electrum_zeny.electrum_zeny' if not os.path.exists(d): os.mkdir(d) return d def android_check_data_dir(): """ if needed, move old directory to sandbox """ ext_dir = android_ext_dir() data_dir = android_data_dir() old_electrum_dir = ext_dir + '/electrum_zeny' if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir): import shutil new_headers_path = android_headers_dir() + '/blockchain_headers' old_headers_path = old_electrum_dir + '/blockchain_headers' if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path): print_error("Moving headers file to", new_headers_path) shutil.move(old_headers_path, new_headers_path) print_error("Moving data to", data_dir) shutil.move(old_electrum_dir, data_dir) return data_dir def get_headers_dir(config): return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path def assert_bytes(*args): """ porting helper, assert args type """ try: for x in args: assert isinstance(x, (bytes, bytearray)) except: print('assert bytes failed', list(map(type, args))) raise def assert_str(*args): """ porting helper, assert args type """ for x in args: assert isinstance(x, str) def to_string(x, enc): if isinstance(x, (bytes, bytearray)): return x.decode(enc) if isinstance(x, str): return x else: raise TypeError("Not a string or bytes like object") def to_bytes(something, encoding='utf8'): """ cast string to bytes() like object, but for python2 support it's bytearray copy """ if isinstance(something, bytes): return something if isinstance(something, str): return something.encode(encoding) elif isinstance(something, bytearray): return bytes(something) else: raise TypeError("Not a string or bytes like object") bfh_builder = lambda x: bytes.fromhex(x) def hfu(x): """ py2-py3 aware wrapper for str.encode('hex') :param x: str :return: str """ assert_bytes(x) return binascii.hexlify(x) def bfh(x): """ py2-py3 aware wrapper to "bytes.fromhex()" func :param x: str :rtype: bytes """ if isinstance(x, str): return bfh_builder(x) # TODO: check for iterator interface elif isinstance(x, (list, tuple, map)): return [bfh(sub) for sub in x] else: raise TypeError('Unexpected type: ' + str(type(x))) def bh2u(x): """ unicode with hex representation of bytes() e.g. x = bytes([1, 2, 10]) bh2u(x) -> '01020A' :param x: bytes :rtype: str """ assert_bytes(x) return binascii.hexlify(x).decode('ascii') def user_dir(): if 'ANDROID_DATA' in os.environ: return android_check_data_dir() elif os.name == 'posix': return os.path.join(os.environ["HOME"], ".electrum-zeny") elif "APPDATA" in os.environ: return os.path.join(os.environ["APPDATA"], "Electrum-ZENY") elif "LOCALAPPDATA" in os.environ: return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-ZENY") else: #raise Exception("No home directory found in environment variables.") return def format_satoshis_plain(x, decimal_point = 8): """Display a satoshi amount scaled. Always uses a '.' as a decimal point and has no thousands separator""" scale_factor = pow(10, decimal_point) return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.') def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False): from locale import localeconv if x is None: return 'unknown' x = int(x) # Some callers pass Decimal scale_factor = pow (10, decimal_point) integer_part = "{:n}".format(int(abs(x) / scale_factor)) if x < 0: integer_part = '-' + integer_part elif is_diff: integer_part = '+' + integer_part dp = localeconv()['decimal_point'] fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor) fract_part = fract_part.rstrip('0') if len(fract_part) < num_zeros: fract_part += "0" * (num_zeros - len(fract_part)) result = integer_part + dp + fract_part if whitespaces: result += " " * (decimal_point - len(fract_part)) result = " " * (15 - len(result)) + result return result def timestamp_to_datetime(timestamp): try: return datetime.fromtimestamp(timestamp) except: return None def format_time(timestamp): date = timestamp_to_datetime(timestamp) return date.isoformat(' ')[:-3] if date else _("Unknown") # Takes a timestamp and returns a string with the approximation of the age def age(from_date, since_date = None, target_tz=None, include_seconds=False): if from_date is None: return "Unknown" from_date = datetime.fromtimestamp(from_date) if since_date is None: since_date = datetime.now(target_tz) td = time_difference(from_date - since_date, include_seconds) return td + " ago" if from_date < since_date else "in " + td def time_difference(distance_in_time, include_seconds): #distance_in_time = since_date - from_date distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds))) distance_in_minutes = int(round(distance_in_seconds/60)) if distance_in_minutes <= 1: if include_seconds: for remainder in [5, 10, 20]: if distance_in_seconds < remainder: return "less than %s seconds" % remainder if distance_in_seconds < 40: return "half a minute" elif distance_in_seconds < 60: return "less than a minute" else: return "1 minute" else: if distance_in_minutes == 0: return "less than a minute" else: return "1 minute" elif distance_in_minutes < 45: return "%s minutes" % distance_in_minutes elif distance_in_minutes < 90: return "about 1 hour" elif distance_in_minutes < 1440: return "about %d hours" % (round(distance_in_minutes / 60.0)) elif distance_in_minutes < 2880: return "1 day" elif distance_in_minutes < 43220: return "%d days" % (round(distance_in_minutes / 1440)) elif distance_in_minutes < 86400: return "about 1 month" elif distance_in_minutes < 525600: return "%d months" % (round(distance_in_minutes / 43200)) elif distance_in_minutes < 1051200: return "about 1 year" else: return "over %d years" % (round(distance_in_minutes / 525600)) mainnet_block_explorers = { 'bchain.info': ('https://bchain.info/MONA', {'tx': 'tx', 'addr': 'addr'}), 'insight.zenyco-ex.org': ('https://zeny.insight.zenyco-ex.org/insight', {'tx': 'tx', 'addr': 'address'}), 'namuyan.dip.jp': ('http://namuyan.dip.jp/MultiLightBlockExplorer/zeny', {'tx': 'tx', 'addr': 'address'}), } testnet_block_explorers = { 'Blocktrail.com': ('https://www.blocktrail.com/tBTC', {'tx': 'tx', 'addr': 'address'}), 'system default': ('blockchain:', {'tx': 'tx', 'addr': 'address'}), } def block_explorer_info(): from . import bitcoin return testnet_block_explorers if bitcoin.TESTNET else mainnet_block_explorers def block_explorer(config): return config.get('block_explorer', 'insight.zenyco-ex.org') def block_explorer_tuple(config): return block_explorer_info().get(block_explorer(config)) def block_explorer_URL(config, kind, item): be_tuple = block_explorer_tuple(config) if not be_tuple: return kind_str = be_tuple[1].get(kind) if not kind_str: return url_parts = [be_tuple[0], kind_str, item] return "/".join(url_parts) # URL decode #_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE) #urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x) def parse_URI(uri, on_pr=None): from . import bitcoin from .bitcoin import COIN if ':' not in uri: if not bitcoin.is_address(uri): raise BaseException("Not a bitzeny address") return {'address': uri} u = urllib.parse.urlparse(uri) if u.scheme != 'bitzeny': raise BaseException("Not a bitzeny URI") address = u.path # python for android fails to parse query if address.find('?') > 0: address, query = u.path.split('?') pq = urllib.parse.parse_qs(query) else: pq = urllib.parse.parse_qs(u.query) for k, v in pq.items(): if len(v)!=1: raise Exception('Duplicate Key', k) out = {k: v[0] for k, v in pq.items()} if address: if not bitcoin.is_address(address): raise BaseException("Invalid bitzeny address:" + address) out['address'] = address if 'amount' in out: am = out['amount'] m = re.match('([0-9\.]+)X([0-9])', am) if m: k = int(m.group(2)) - 8 amount = Decimal(m.group(1)) * pow( Decimal(10) , k) else: amount = Decimal(am) * COIN out['amount'] = int(amount) if 'message' in out: out['message'] = out['message'] out['memo'] = out['message'] if 'time' in out: out['time'] = int(out['time']) if 'exp' in out: out['exp'] = int(out['exp']) if 'sig' in out: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58)) r = out.get('r') sig = out.get('sig') name = out.get('name') if on_pr and (r or (name and sig)): def get_payment_request_thread(): from . import paymentrequest as pr if name and sig: s = pr.serialize_request(out).SerializeToString() request = pr.PaymentRequest(s) else: request = pr.get_payment_request(r) if on_pr: on_pr(request) t = threading.Thread(target=get_payment_request_thread) t.setDaemon(True) t.start() return out def create_URI(addr, amount, message): from . import bitcoin if not bitcoin.is_address(addr): return "" query = [] if amount: query.append('amount=%s'%format_satoshis_plain(amount)) if message: query.append('message=%s'%urllib.parse.quote(message)) p = urllib.parse.ParseResult(scheme='bitzeny', netloc='', path=addr, params='', query='&'.join(query), fragment='') return urllib.parse.urlunparse(p) # Python bug (http://bugs.python.org/issue1927) causes raw_input # to be redirected improperly between stdin/stderr on Unix systems #TODO: py3 def raw_input(prompt=None): if prompt: sys.stdout.write(prompt) return builtin_raw_input() import builtins builtin_raw_input = builtins.input builtins.input = raw_input def parse_json(message): # TODO: check \r\n pattern n = message.find(b'\n') if n==-1: return None, message try: j = json.loads(message[0:n].decode('utf8')) except: j = None return j, message[n+1:] class timeout(Exception): pass import socket import errno import json import ssl import time class SocketPipe: def __init__(self, socket): self.socket = socket self.message = b'' self.set_timeout(0.1) self.recv_time = time.time() def set_timeout(self, t): self.socket.settimeout(t) def idle_time(self): return time.time() - self.recv_time def get(self): while True: response, self.message = parse_json(self.message) if response is not None: return response try: data = self.socket.recv(1024) except socket.timeout: raise timeout except ssl.SSLError: raise timeout except socket.error as err: if err.errno == 60: raise timeout elif err.errno in [11, 35, 10035]: print_error("socket errno %d (resource temporarily unavailable)"% err.errno) time.sleep(0.2) raise timeout else: print_error("pipe: socket error", err) data = b'' except: traceback.print_exc(file=sys.stderr) data = b'' if not data: # Connection closed remotely return None self.message += data self.recv_time = time.time() def send(self, request): out = json.dumps(request) + '\n' out = out.encode('utf8') self._send(out) def send_all(self, requests): out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests)) self._send(out) def _send(self, out): while out: try: sent = self.socket.send(out) out = out[sent:] except ssl.SSLError as e: print_error("SSLError:", e) time.sleep(0.1) continue except socket.error as e: if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN): print_error("EAGAIN: retrying") time.sleep(0.1) continue elif e[0] in ['timed out', 'The write operation timed out']: print_error("socket timeout, retry") time.sleep(0.1) continue else: traceback.print_exc(file=sys.stdout) raise e class QueuePipe: def __init__(self, send_queue=None, get_queue=None): self.send_queue = send_queue if send_queue else queue.Queue() self.get_queue = get_queue if get_queue else queue.Queue() self.set_timeout(0.1) def get(self): try: return self.get_queue.get(timeout=self.timeout) except queue.Empty: raise timeout def get_all(self): responses = [] while True: try: r = self.get_queue.get_nowait() responses.append(r) except queue.Empty: break return responses def set_timeout(self, t): self.timeout = t def send(self, request): self.send_queue.put(request) def send_all(self, requests): for request in requests: self.send(request) def check_www_dir(rdir): import urllib, shutil, os if not os.path.exists(rdir): os.mkdir(rdir) index = os.path.join(rdir, 'index.html') if not os.path.exists(index): print_error("copying index.html") src = os.path.join(os.path.dirname(__file__), 'www', 'index.html') shutil.copy(src, index) files = [ "https://code.jquery.com/jquery-1.9.1.min.js", "https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js", "https://code.jquery.com/ui/1.10.3/jquery-ui.js", "https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css" ] for URL in files: path = urllib.parse.urlsplit(URL).path filename = os.path.basename(path) path = os.path.join(rdir, filename) if not os.path.exists(path): print_error("downloading ", URL) urllib.request.urlretrieve(URL, path)
test_signal.py
import errno import os import random import signal import socket import statistics import subprocess import sys import time import unittest from test import support from test.support.script_helper import assert_python_ok, spawn_python try: import _testcapi except ImportError: _testcapi = None class GenericTests(unittest.TestCase): def test_enums(self): for name in dir(signal): sig = getattr(signal, name) if name in {'SIG_DFL', 'SIG_IGN'}: self.assertIsInstance(sig, signal.Handlers) elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}: self.assertIsInstance(sig, signal.Sigmasks) elif name.startswith('SIG') and not name.startswith('SIG_'): self.assertIsInstance(sig, signal.Signals) elif name.startswith('CTRL_'): self.assertIsInstance(sig, signal.Signals) self.assertEqual(sys.platform, "win32") @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class PosixTests(unittest.TestCase): def trivial_signal_handler(self, *args): pass def test_out_of_range_signal_number_raises_error(self): self.assertRaises(ValueError, signal.getsignal, 4242) self.assertRaises(ValueError, signal.signal, 4242, self.trivial_signal_handler) self.assertRaises(ValueError, signal.strsignal, 4242) def test_setting_signal_handler_to_none_raises_error(self): self.assertRaises(TypeError, signal.signal, signal.SIGUSR1, None) def test_getsignal(self): hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler) self.assertIsInstance(hup, signal.Handlers) self.assertEqual(signal.getsignal(signal.SIGHUP), self.trivial_signal_handler) signal.signal(signal.SIGHUP, hup) self.assertEqual(signal.getsignal(signal.SIGHUP), hup) def test_strsignal(self): self.assertIn("Interrupt", signal.strsignal(signal.SIGINT)) self.assertIn("Terminated", signal.strsignal(signal.SIGTERM)) self.assertIn("Hangup", signal.strsignal(signal.SIGHUP)) # Issue 3864, unknown if this affects earlier versions of freebsd also def test_interprocess_signal(self): dirname = os.path.dirname(__file__) script = os.path.join(dirname, 'signalinterproctester.py') assert_python_ok(script) def test_valid_signals(self): s = signal.valid_signals() self.assertIsInstance(s, set) self.assertIn(signal.Signals.SIGINT, s) self.assertIn(signal.Signals.SIGALRM, s) self.assertNotIn(0, s) self.assertNotIn(signal.NSIG, s) self.assertLess(len(s), signal.NSIG) @unittest.skipUnless(sys.executable, "sys.executable required.") def test_keyboard_interrupt_exit_code(self): """KeyboardInterrupt triggers exit via SIGINT.""" process = subprocess.run( [sys.executable, "-c", "import os, signal, time\n" "os.kill(os.getpid(), signal.SIGINT)\n" "for _ in range(999): time.sleep(0.01)"], stderr=subprocess.PIPE) self.assertIn(b"KeyboardInterrupt", process.stderr) self.assertEqual(process.returncode, -signal.SIGINT) # Caveat: The exit code is insufficient to guarantee we actually died # via a signal. POSIX shells do more than look at the 8 bit value. # Writing an automation friendly test of an interactive shell # to confirm that our process died via a SIGINT proved too complex. @unittest.skipUnless(sys.platform == "win32", "Windows specific") class WindowsSignalTests(unittest.TestCase): def test_valid_signals(self): s = signal.valid_signals() self.assertIsInstance(s, set) self.assertGreaterEqual(len(s), 6) self.assertIn(signal.Signals.SIGINT, s) self.assertNotIn(0, s) self.assertNotIn(signal.NSIG, s) self.assertLess(len(s), signal.NSIG) def test_issue9324(self): # Updated for issue #10003, adding SIGBREAK handler = lambda x, y: None checked = set() for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM): # Set and then reset a handler for signals that work on windows. # Issue #18396, only for signals without a C-level handler. if signal.getsignal(sig) is not None: signal.signal(sig, signal.signal(sig, handler)) checked.add(sig) # Issue #18396: Ensure the above loop at least tested *something* self.assertTrue(checked) with self.assertRaises(ValueError): signal.signal(-1, handler) with self.assertRaises(ValueError): signal.signal(7, handler) @unittest.skipUnless(sys.executable, "sys.executable required.") def test_keyboard_interrupt_exit_code(self): """KeyboardInterrupt triggers an exit using STATUS_CONTROL_C_EXIT.""" # We don't test via os.kill(os.getpid(), signal.CTRL_C_EVENT) here # as that requires setting up a console control handler in a child # in its own process group. Doable, but quite complicated. (see # @eryksun on https://github.com/python/cpython/pull/11862) process = subprocess.run( [sys.executable, "-c", "raise KeyboardInterrupt"], stderr=subprocess.PIPE) self.assertIn(b"KeyboardInterrupt", process.stderr) STATUS_CONTROL_C_EXIT = 0xC000013A self.assertEqual(process.returncode, STATUS_CONTROL_C_EXIT) class WakeupFDTests(unittest.TestCase): def test_invalid_call(self): # First parameter is positional-only with self.assertRaises(TypeError): signal.set_wakeup_fd(signum=signal.SIGINT) # warn_on_full_buffer is a keyword-only parameter with self.assertRaises(TypeError): signal.set_wakeup_fd(signal.SIGINT, False) def test_invalid_fd(self): fd = support.make_bad_fd() self.assertRaises((ValueError, OSError), signal.set_wakeup_fd, fd) def test_invalid_socket(self): sock = socket.socket() fd = sock.fileno() sock.close() self.assertRaises((ValueError, OSError), signal.set_wakeup_fd, fd) def test_set_wakeup_fd_result(self): r1, w1 = os.pipe() self.addCleanup(os.close, r1) self.addCleanup(os.close, w1) r2, w2 = os.pipe() self.addCleanup(os.close, r2) self.addCleanup(os.close, w2) if hasattr(os, 'set_blocking'): os.set_blocking(w1, False) os.set_blocking(w2, False) signal.set_wakeup_fd(w1) self.assertEqual(signal.set_wakeup_fd(w2), w1) self.assertEqual(signal.set_wakeup_fd(-1), w2) self.assertEqual(signal.set_wakeup_fd(-1), -1) def test_set_wakeup_fd_socket_result(self): sock1 = socket.socket() self.addCleanup(sock1.close) sock1.setblocking(False) fd1 = sock1.fileno() sock2 = socket.socket() self.addCleanup(sock2.close) sock2.setblocking(False) fd2 = sock2.fileno() signal.set_wakeup_fd(fd1) self.assertEqual(signal.set_wakeup_fd(fd2), fd1) self.assertEqual(signal.set_wakeup_fd(-1), fd2) self.assertEqual(signal.set_wakeup_fd(-1), -1) # On Windows, files are always blocking and Windows does not provide a # function to test if a socket is in non-blocking mode. @unittest.skipIf(sys.platform == "win32", "tests specific to POSIX") def test_set_wakeup_fd_blocking(self): rfd, wfd = os.pipe() self.addCleanup(os.close, rfd) self.addCleanup(os.close, wfd) # fd must be non-blocking os.set_blocking(wfd, True) with self.assertRaises(ValueError) as cm: signal.set_wakeup_fd(wfd) self.assertEqual(str(cm.exception), "the fd %s must be in non-blocking mode" % wfd) # non-blocking is ok os.set_blocking(wfd, False) signal.set_wakeup_fd(wfd) signal.set_wakeup_fd(-1) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class WakeupSignalTests(unittest.TestCase): @unittest.skipIf(_testcapi is None, 'need _testcapi') def check_wakeup(self, test_body, *signals, ordered=True): # use a subprocess to have only one thread code = """if 1: import _testcapi import os import signal import struct signals = {!r} def handler(signum, frame): pass def check_signum(signals): data = os.read(read, len(signals)+1) raised = struct.unpack('%uB' % len(data), data) if not {!r}: raised = set(raised) signals = set(signals) if raised != signals: raise Exception("%r != %r" % (raised, signals)) {} signal.signal(signal.SIGALRM, handler) read, write = os.pipe() os.set_blocking(write, False) signal.set_wakeup_fd(write) test() check_signum(signals) os.close(read) os.close(write) """.format(tuple(map(int, signals)), ordered, test_body) assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_wakeup_write_error(self): # Issue #16105: write() errors in the C signal handler should not # pass silently. # Use a subprocess to have only one thread. code = """if 1: import _testcapi import errno import os import signal import sys from test.support import captured_stderr def handler(signum, frame): 1/0 signal.signal(signal.SIGALRM, handler) r, w = os.pipe() os.set_blocking(r, False) # Set wakeup_fd a read-only file descriptor to trigger the error signal.set_wakeup_fd(r) try: with captured_stderr() as err: signal.raise_signal(signal.SIGALRM) except ZeroDivisionError: # An ignored exception should have been printed out on stderr err = err.getvalue() if ('Exception ignored when trying to write to the signal wakeup fd' not in err): raise AssertionError(err) if ('OSError: [Errno %d]' % errno.EBADF) not in err: raise AssertionError(err) else: raise AssertionError("ZeroDivisionError not raised") os.close(r) os.close(w) """ r, w = os.pipe() try: os.write(r, b'x') except OSError: pass else: self.skipTest("OS doesn't report write() error on the read end of a pipe") finally: os.close(r) os.close(w) assert_python_ok('-c', code) def test_wakeup_fd_early(self): self.check_wakeup("""def test(): import select import time TIMEOUT_FULL = 10 TIMEOUT_HALF = 5 class InterruptSelect(Exception): pass def handler(signum, frame): raise InterruptSelect signal.signal(signal.SIGALRM, handler) signal.alarm(1) # We attempt to get a signal during the sleep, # before select is called try: select.select([], [], [], TIMEOUT_FULL) except InterruptSelect: pass else: raise Exception("select() was not interrupted") before_time = time.monotonic() select.select([read], [], [], TIMEOUT_FULL) after_time = time.monotonic() dt = after_time - before_time if dt >= TIMEOUT_HALF: raise Exception("%s >= %s" % (dt, TIMEOUT_HALF)) """, signal.SIGALRM) def test_wakeup_fd_during(self): self.check_wakeup("""def test(): import select import time TIMEOUT_FULL = 10 TIMEOUT_HALF = 5 class InterruptSelect(Exception): pass def handler(signum, frame): raise InterruptSelect signal.signal(signal.SIGALRM, handler) signal.alarm(1) before_time = time.monotonic() # We attempt to get a signal during the select call try: select.select([read], [], [], TIMEOUT_FULL) except InterruptSelect: pass else: raise Exception("select() was not interrupted") after_time = time.monotonic() dt = after_time - before_time if dt >= TIMEOUT_HALF: raise Exception("%s >= %s" % (dt, TIMEOUT_HALF)) """, signal.SIGALRM) def test_signum(self): self.check_wakeup("""def test(): signal.signal(signal.SIGUSR1, handler) signal.raise_signal(signal.SIGUSR1) signal.raise_signal(signal.SIGALRM) """, signal.SIGUSR1, signal.SIGALRM) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pending(self): self.check_wakeup("""def test(): signum1 = signal.SIGUSR1 signum2 = signal.SIGUSR2 signal.signal(signum1, handler) signal.signal(signum2, handler) signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2)) signal.raise_signal(signum1) signal.raise_signal(signum2) # Unblocking the 2 signals calls the C signal handler twice signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2)) """, signal.SIGUSR1, signal.SIGUSR2, ordered=False) @unittest.skipUnless(hasattr(socket, 'socketpair'), 'need socket.socketpair') class WakeupSocketSignalTests(unittest.TestCase): @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_socket(self): # use a subprocess to have only one thread code = """if 1: import signal import socket import struct import _testcapi signum = signal.SIGINT signals = (signum,) def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() write.setblocking(False) signal.set_wakeup_fd(write.fileno()) signal.raise_signal(signum) data = read.recv(1) if not data: raise Exception("no signum written") raised = struct.unpack('B', data) if raised != signals: raise Exception("%r != %r" % (raised, signals)) read.close() write.close() """ assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_send_error(self): # Use a subprocess to have only one thread. if os.name == 'nt': action = 'send' else: action = 'write' code = """if 1: import errno import signal import socket import sys import time import _testcapi from test.support import captured_stderr signum = signal.SIGINT def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() read.setblocking(False) write.setblocking(False) signal.set_wakeup_fd(write.fileno()) # Close sockets: send() will fail read.close() write.close() with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if ('Exception ignored when trying to {action} to the signal wakeup fd' not in err): raise AssertionError(err) """.format(action=action) assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_warn_on_full_buffer(self): # Use a subprocess to have only one thread. if os.name == 'nt': action = 'send' else: action = 'write' code = """if 1: import errno import signal import socket import sys import time import _testcapi from test.support import captured_stderr signum = signal.SIGINT # This handler will be called, but we intentionally won't read from # the wakeup fd. def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() # Fill the socketpair buffer if sys.platform == 'win32': # bpo-34130: On Windows, sometimes non-blocking send fails to fill # the full socketpair buffer, so use a timeout of 50 ms instead. write.settimeout(0.050) else: write.setblocking(False) # Start with large chunk size to reduce the # number of send needed to fill the buffer. written = 0 for chunk_size in (2 ** 16, 2 ** 8, 1): chunk = b"x" * chunk_size try: while True: write.send(chunk) written += chunk_size except (BlockingIOError, socket.timeout): pass print(f"%s bytes written into the socketpair" % written, flush=True) write.setblocking(False) try: write.send(b"x") except BlockingIOError: # The socketpair buffer seems full pass else: raise AssertionError("%s bytes failed to fill the socketpair " "buffer" % written) # By default, we get a warning when a signal arrives msg = ('Exception ignored when trying to {action} ' 'to the signal wakeup fd') signal.set_wakeup_fd(write.fileno()) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("first set_wakeup_fd() test failed, " "stderr: %r" % err) # And also if warn_on_full_buffer=True signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=True) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("set_wakeup_fd(warn_on_full_buffer=True) " "test failed, stderr: %r" % err) # But not if warn_on_full_buffer=False signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=False) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if err != "": raise AssertionError("set_wakeup_fd(warn_on_full_buffer=False) " "test failed, stderr: %r" % err) # And then check the default again, to make sure warn_on_full_buffer # settings don't leak across calls. signal.set_wakeup_fd(write.fileno()) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("second set_wakeup_fd() test failed, " "stderr: %r" % err) """.format(action=action) assert_python_ok('-c', code) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class SiginterruptTest(unittest.TestCase): def readpipe_interrupted(self, interrupt): """Perform a read during which a signal will arrive. Return True if the read is interrupted by the signal and raises an exception. Return False if it returns normally. """ # use a subprocess to have only one thread, to have a timeout on the # blocking read and to not touch signal handling in this process code = """if 1: import errno import os import signal import sys interrupt = %r r, w = os.pipe() def handler(signum, frame): 1 / 0 signal.signal(signal.SIGALRM, handler) if interrupt is not None: signal.siginterrupt(signal.SIGALRM, interrupt) print("ready") sys.stdout.flush() # run the test twice try: for loop in range(2): # send a SIGALRM in a second (during the read) signal.alarm(1) try: # blocking call: read from a pipe without data os.read(r, 1) except ZeroDivisionError: pass else: sys.exit(2) sys.exit(3) finally: os.close(r) os.close(w) """ % (interrupt,) with spawn_python('-c', code) as process: try: # wait until the child process is loaded and has started first_line = process.stdout.readline() stdout, stderr = process.communicate(timeout=5.0) except subprocess.TimeoutExpired: process.kill() return False else: stdout = first_line + stdout exitcode = process.wait() if exitcode not in (2, 3): raise Exception("Child error (exit code %s): %r" % (exitcode, stdout)) return (exitcode == 3) def test_without_siginterrupt(self): # If a signal handler is installed and siginterrupt is not called # at all, when that signal arrives, it interrupts a syscall that's in # progress. interrupted = self.readpipe_interrupted(None) self.assertTrue(interrupted) def test_siginterrupt_on(self): # If a signal handler is installed and siginterrupt is called with # a true value for the second argument, when that signal arrives, it # interrupts a syscall that's in progress. interrupted = self.readpipe_interrupted(True) self.assertTrue(interrupted) def test_siginterrupt_off(self): # If a signal handler is installed and siginterrupt is called with # a false value for the second argument, when that signal arrives, it # does not interrupt a syscall that's in progress. interrupted = self.readpipe_interrupted(False) self.assertFalse(interrupted) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class ItimerTest(unittest.TestCase): def setUp(self): self.hndl_called = False self.hndl_count = 0 self.itimer = None self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm) def tearDown(self): signal.signal(signal.SIGALRM, self.old_alarm) if self.itimer is not None: # test_itimer_exc doesn't change this attr # just ensure that itimer is stopped signal.setitimer(self.itimer, 0) def sig_alrm(self, *args): self.hndl_called = True def sig_vtalrm(self, *args): self.hndl_called = True if self.hndl_count > 3: # it shouldn't be here, because it should have been disabled. raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL " "timer.") elif self.hndl_count == 3: # disable ITIMER_VIRTUAL, this function shouldn't be called anymore signal.setitimer(signal.ITIMER_VIRTUAL, 0) self.hndl_count += 1 def sig_prof(self, *args): self.hndl_called = True signal.setitimer(signal.ITIMER_PROF, 0) def test_itimer_exc(self): # XXX I'm assuming -1 is an invalid itimer, but maybe some platform # defines it ? self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0) # Negative times are treated as zero on some platforms. if 0: self.assertRaises(signal.ItimerError, signal.setitimer, signal.ITIMER_REAL, -1) def test_itimer_real(self): self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1.0) signal.pause() self.assertEqual(self.hndl_called, True) # Issue 3864, unknown if this affects earlier versions of freebsd also @unittest.skipIf(sys.platform in ('netbsd5',), 'itimer not reliable (does not mix well with threading) on some BSDs.') def test_itimer_virtual(self): self.itimer = signal.ITIMER_VIRTUAL signal.signal(signal.SIGVTALRM, self.sig_vtalrm) signal.setitimer(self.itimer, 0.3, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # use up some virtual time by doing real work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_vtalrm handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # virtual itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) def test_itimer_prof(self): self.itimer = signal.ITIMER_PROF signal.signal(signal.SIGPROF, self.sig_prof) signal.setitimer(self.itimer, 0.2, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # do some work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_prof handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # profiling itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) def test_setitimer_tiny(self): # bpo-30807: C setitimer() takes a microsecond-resolution interval. # Check that float -> timeval conversion doesn't round # the interval down to zero, which would disable the timer. self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1e-6) time.sleep(1) self.assertEqual(self.hndl_called, True) class PendingSignalsTests(unittest.TestCase): """ Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait() functions. """ @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending_empty(self): self.assertEqual(signal.sigpending(), set()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending(self): code = """if 1: import os import signal def handler(signum, frame): 1/0 signum = signal.SIGUSR1 signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) os.kill(os.getpid(), signum) pending = signal.sigpending() for sig in pending: assert isinstance(sig, signal.Signals), repr(pending) if pending != {signum}: raise Exception('%s != {%s}' % (pending, signum)) try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill(self): code = """if 1: import signal import threading import sys signum = signal.SIGUSR1 def handler(signum, frame): 1/0 signal.signal(signum, handler) tid = threading.get_ident() try: signal.pthread_kill(tid, signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def wait_helper(self, blocked, test): """ test: body of the "def test(signum):" function. blocked: number of the blocked signal """ code = '''if 1: import signal import sys from signal import Signals def handler(signum, frame): 1/0 %s blocked = %s signum = signal.SIGALRM # child: block and wait the signal try: signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [blocked]) # Do the tests test(signum) # The handler must not be called on unblock try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked]) except ZeroDivisionError: print("the signal handler has been called", file=sys.stderr) sys.exit(1) except BaseException as err: print("error: {}".format(err), file=sys.stderr) sys.stderr.flush() sys.exit(1) ''' % (test.strip(), blocked) # sig*wait* must be called with the signal blocked: since the current # process might have several threads running, use a subprocess to have # a single thread. assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') def test_sigwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) received = signal.sigwait([signum]) assert isinstance(received, signal.Signals), received if received != signum: raise Exception('received %s, not %s' % (received, signum)) ''') @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigwaitinfo([signum]) if info.si_signo != signum: raise Exception("info.si_signo != %s" % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigtimedwait([signum], 10.1000) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_poll(self): # check that polling with sigtimedwait works self.wait_helper(signal.SIGALRM, ''' def test(signum): import os os.kill(os.getpid(), signum) info = signal.sigtimedwait([signum], 0) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_timeout(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): received = signal.sigtimedwait([signum], 1.0) if received is not None: raise Exception("received=%r" % (received,)) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_negative_timeout(self): signum = signal.SIGALRM self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_sigwait_thread(self): # Check that calling sigwait() from a thread doesn't suspend the whole # process. A new interpreter is spawned to avoid problems when mixing # threads and fork(): only async-safe functions are allowed between # fork() and exec(). assert_python_ok("-c", """if True: import os, threading, sys, time, signal # the default handler terminates the process signum = signal.SIGUSR1 def kill_later(): # wait until the main thread is waiting in sigwait() time.sleep(1) os.kill(os.getpid(), signum) # the signal must be blocked by all the threads signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) killer = threading.Thread(target=kill_later) killer.start() received = signal.sigwait([signum]) if received != signum: print("sigwait() received %s, not %s" % (received, signum), file=sys.stderr) sys.exit(1) killer.join() # unblock the signal, which should have been cleared by sigwait() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) """) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_arguments(self): self.assertRaises(TypeError, signal.pthread_sigmask) self.assertRaises(TypeError, signal.pthread_sigmask, 1) self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3) self.assertRaises(OSError, signal.pthread_sigmask, 1700, []) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [0]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000]) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_valid_signals(self): s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s) # Get current blocked set s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals()) self.assertLessEqual(s, signal.valid_signals()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask(self): code = """if 1: import signal import os; import threading def handler(signum, frame): 1/0 def kill(signum): os.kill(os.getpid(), signum) def check_mask(mask): for sig in mask: assert isinstance(sig, signal.Signals), repr(sig) def read_sigmask(): sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, []) check_mask(sigmask) return sigmask signum = signal.SIGUSR1 # Install our signal handler old_handler = signal.signal(signum, handler) # Unblock SIGUSR1 (and copy the old mask) to test our signal handler old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) check_mask(old_mask) try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Block and then raise SIGUSR1. The signal is blocked: the signal # handler is not called, and the signal is now pending mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) check_mask(mask) kill(signum) # Check the new mask blocked = read_sigmask() check_mask(blocked) if signum not in blocked: raise Exception("%s not in %s" % (signum, blocked)) if old_mask ^ blocked != {signum}: raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum)) # Unblock SIGUSR1 try: # unblock the pending signal calls immediately the signal handler signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Check the new mask unblocked = read_sigmask() if signum in unblocked: raise Exception("%s in %s" % (signum, unblocked)) if blocked ^ unblocked != {signum}: raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum)) if old_mask != unblocked: raise Exception("%s != %s" % (old_mask, unblocked)) """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill_main_thread(self): # Test that a signal can be sent to the main thread with pthread_kill() # before any other thread has been created (see issue #12392). code = """if True: import threading import signal import sys def handler(signum, frame): sys.exit(3) signal.signal(signal.SIGUSR1, handler) signal.pthread_kill(threading.get_ident(), signal.SIGUSR1) sys.exit(2) """ with spawn_python('-c', code) as process: stdout, stderr = process.communicate() exitcode = process.wait() if exitcode != 3: raise Exception("Child error (exit code %s): %s" % (exitcode, stdout)) class StressTest(unittest.TestCase): """ Stress signal delivery, especially when a signal arrives in the middle of recomputing the signal state or executing previously tripped signal handlers. """ def setsig(self, signum, handler): old_handler = signal.signal(signum, handler) self.addCleanup(signal.signal, signum, old_handler) def measure_itimer_resolution(self): N = 20 times = [] def handler(signum=None, frame=None): if len(times) < N: times.append(time.perf_counter()) # 1 µs is the smallest possible timer interval, # we want to measure what the concrete duration # will be on this platform signal.setitimer(signal.ITIMER_REAL, 1e-6) self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0) self.setsig(signal.SIGALRM, handler) handler() while len(times) < N: time.sleep(1e-3) durations = [times[i+1] - times[i] for i in range(len(times) - 1)] med = statistics.median(durations) if support.verbose: print("detected median itimer() resolution: %.6f s." % (med,)) return med def decide_itimer_count(self): # Some systems have poor setitimer() resolution (for example # measured around 20 ms. on FreeBSD 9), so decide on a reasonable # number of sequential timers based on that. reso = self.measure_itimer_resolution() if reso <= 1e-4: return 10000 elif reso <= 1e-2: return 100 else: self.skipTest("detected itimer resolution (%.3f s.) too high " "(> 10 ms.) on this platform (or system too busy)" % (reso,)) @unittest.skipUnless(hasattr(signal, "setitimer"), "test needs setitimer()") def test_stress_delivery_dependent(self): """ This test uses dependent signal handlers. """ N = self.decide_itimer_count() sigs = [] def first_handler(signum, frame): # 1e-6 is the minimum non-zero value for `setitimer()`. # Choose a random delay so as to improve chances of # triggering a race condition. Ideally the signal is received # when inside critical signal-handling routines such as # Py_MakePendingCalls(). signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5) def second_handler(signum=None, frame=None): sigs.append(signum) # Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both # ascending and descending sequences (SIGUSR1 then SIGALRM, # SIGPROF then SIGALRM), we maximize chances of hitting a bug. self.setsig(signal.SIGPROF, first_handler) self.setsig(signal.SIGUSR1, first_handler) self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL expected_sigs = 0 deadline = time.monotonic() + 15.0 while expected_sigs < N: os.kill(os.getpid(), signal.SIGPROF) expected_sigs += 1 # Wait for handlers to run to avoid signal coalescing while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) os.kill(os.getpid(), signal.SIGUSR1) expected_sigs += 1 while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) # All ITIMER_REAL signals should have been delivered to the # Python handler self.assertEqual(len(sigs), N, "Some signals were lost") @unittest.skipUnless(hasattr(signal, "setitimer"), "test needs setitimer()") def test_stress_delivery_simultaneous(self): """ This test uses simultaneous signal handlers. """ N = self.decide_itimer_count() sigs = [] def handler(signum, frame): sigs.append(signum) self.setsig(signal.SIGUSR1, handler) self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL expected_sigs = 0 deadline = time.monotonic() + 15.0 while expected_sigs < N: # Hopefully the SIGALRM will be received somewhere during # initial processing of SIGUSR1. signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5) os.kill(os.getpid(), signal.SIGUSR1) expected_sigs += 2 # Wait for handlers to run to avoid signal coalescing while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) # All ITIMER_REAL signals should have been delivered to the # Python handler self.assertEqual(len(sigs), N, "Some signals were lost") class RaiseSignalTest(unittest.TestCase): def test_sigint(self): with self.assertRaises(KeyboardInterrupt): signal.raise_signal(signal.SIGINT) @unittest.skipIf(sys.platform != "win32", "Windows specific test") def test_invalid_argument(self): try: SIGHUP = 1 # not supported on win32 signal.raise_signal(SIGHUP) self.fail("OSError (Invalid argument) expected") except OSError as e: if e.errno == errno.EINVAL: pass else: raise def test_handler(self): is_ok = False def handler(a, b): nonlocal is_ok is_ok = True old_signal = signal.signal(signal.SIGINT, handler) self.addCleanup(signal.signal, signal.SIGINT, old_signal) signal.raise_signal(signal.SIGINT) self.assertTrue(is_ok) def tearDownModule(): support.reap_children() if __name__ == "__main__": unittest.main()
predict.py
# Arda Mavi from keras.models import Sequential from keras.models import model_from_json import cv2 import sys from get_dataset import get_img import numpy as np from scipy.misc import imresize import time from threading import Thread class Predict: def __init__(self): pass def predict(self, model, X): Y = model.predict(X) Y = np.argmax(Y, axis=1) Y = 'cat' if Y[0] == 0 else 'dog' return Y def predict_frame(self, frame): X = np.zeros((1, 64, 64, 3), dtype='float64') X[0] = frame # Getting model: model_file = open('Data/Model/model.json', 'r') model = model_file.read() model_file.close() model = model_from_json(model) # Getting weights model.load_weights("Data/Model/weights.h5") Y = self.predict(model, X) print('It is a ' + Y + ' !') return Y def predict_frame_as_file(self, frame_file): img_dir = frame_file img = get_img(img_dir) self.predict_frame(img) def wait_and_compress_img(self, img, path): time.sleep(10) result, encimg = cv2.imencode('.jpg', img) if result: cv2.imwrite(path, encimg) def predict_video(self, video_file): cap = cv2.VideoCapture(video_file) i = 0 threads_lst = [] while (cap.isOpened()): ret, frame = cap.read() if ret: resized_frame = imresize(frame, (64, 64, 3)) #resized_frame = cv2.resize(frame, (64, 64)) classification = self.predict_frame(resized_frame) threads_lst.append( Thread(target=self.wait_and_compress_img, args=(resized_frame, f'output\\{classification}s\\{i}.jpg'))) i += 1 if cv2.waitKey(1) & 0xFF == ord('q'): break for t in threads_lst: t.join() cap.release() if __name__ == '__main__': img_dir = sys.argv[1] Predict().predict_video(img_dir)
log.py
import functools import logging import multiprocessing import sys import threading import time import traceback from typing import Callable, List from termcolor import colored from .multiproc import get_worker_id from .types import PathType __all__ = [ "get_logging_levels", "set_log_file", "log", "set_logging_level", "set_console_logging_function", ] class MultiprocessingFileHandler(logging.Handler): """multiprocessing log handler This handler makes it possible for several processes to log to the same file by using a queue. Credit: https://mattgathu.github.io/multiprocessing-logging-in-python/ """ def __init__(self, path: PathType, mode: str = "a"): logging.Handler.__init__(self) self._handler = logging.FileHandler(path, mode=mode) self.queue: 'multiprocessing.Queue[str]' = multiprocessing.Queue(-1) thrd = threading.Thread(target=self.receive) thrd.daemon = True thrd.start() def setFormatter(self, fmt): logging.Handler.setFormatter(self, fmt) self._handler.setFormatter(fmt) def receive(self): while True: try: record = self.queue.get() self._handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except: traceback.print_exc(file=sys.stderr) def send(self, s): self.queue.put_nowait(s) def _format_record(self, record): if record.args: record.msg = record.msg % record.args record.args = None if record.exc_info: _ = self.format(record) record.exc_info = None return record def emit(self, record): try: s = self._format_record(record) self.send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def close(self): self._handler.close() logging.Handler.close(self) def _remove_handlers(logger): while len(logger.handlers) > 0: handler = logger.handlers[0] handler.close() logger.removeHandler(handler) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.INFO) _remove_handlers(LOGGER) # remove all default handlers COLOR_MAP = { "success": "green", "warning": "yellow", "error": "red", "info": "white", } LOGGING_MAP = { "success": LOGGER.info, "warning": LOGGER.warning, "error": LOGGER.error, "info": LOGGER.info, } _CONSOLE_LOG_FN: Callable[[str], None] = functools.partial(print, flush=True) LEVEL_MAP = { "success": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "info": logging.INFO, "quiet": 999, } _CONSOLE_LOGGING_LEVEL = multiprocessing.Value('i', LEVEL_MAP["info"], lock=False) def get_logging_levels() -> List[str]: r"""Return a list of logging levels that the logging system supports.""" return list(LEVEL_MAP.keys()) def set_log_file(path: PathType, fmt: str = "%(asctime)s %(levelname)s: %(message)s") -> None: r"""Set the path of the log file. :param path: Path to the log file. :param fmt: Logging format. """ _remove_handlers(LOGGER) handler = MultiprocessingFileHandler(path, mode="a") handler.setFormatter(logging.Formatter(fmt)) LOGGER.addHandler(handler) def log(msg: str, level: str = "info", force_console: bool = False, include_proc_id: bool = True) -> None: r"""Write a line of log with the specified logging level. :param msg: Message to log. :param level: Logging level. Available options are ``success``, ``warning``, ``error``, and ``info``. :param force_console: If ``True``, will write to console regardless of logging level setting. :param include_proc_id: If ``True``, will include the process ID for multiprocessing pool workers. """ if level not in LOGGING_MAP: raise ValueError(f"Incorrect logging level '{level}'") if include_proc_id: worker_id = get_worker_id() if worker_id is not None: msg = f"(Worker {worker_id:2d}) {msg}" if force_console or LEVEL_MAP[level] >= _CONSOLE_LOGGING_LEVEL.value: time_str = time.strftime("[%Y-%m-%d %H:%M:%S]") _CONSOLE_LOG_FN(colored(time_str, COLOR_MAP[level]) + " " + msg) if LOGGER.hasHandlers(): LOGGING_MAP[level](msg) def set_logging_level(level: str, console: bool = True, file: bool = True) -> None: r"""Set the global logging level to the specified level. :param level: Logging level. :param console: If ``True``, the specified logging level applies to console output. :param file: If ``True``, the specified logging level applies to file output. """ if level not in LEVEL_MAP: raise ValueError(f"Incorrect logging level '{level}'") if console: _CONSOLE_LOGGING_LEVEL.value = LEVEL_MAP[level] if file: LOGGER.setLevel(LEVEL_MAP[level]) def set_console_logging_function(log_fn: Callable[[str], None]) -> None: r"""Set the console logging function **for current process only**.""" global _CONSOLE_LOG_FN _CONSOLE_LOG_FN = log_fn
example_userdata_stream.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # File: example_userdata_stream.py # # Part of ‘UNICORN Binance WebSocket API’ # Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api # Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api # PyPI: https://pypi.org/project/unicorn-binance-websocket-api/ # # Author: LUCIT Systems and Development # # Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager import logging import time import threading import os logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager") logging.basicConfig(level=logging.INFO, filename=os.path.basename(__file__) + '.log', format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}", style="{") def print_stream_data_from_stream_buffer(binance_websocket_api_manager): while True: if binance_websocket_api_manager.is_manager_stopping(): exit(0) oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer() if oldest_stream_data_from_stream_buffer is False: time.sleep(0.01) else: print(oldest_stream_data_from_stream_buffer) # configure api key and secret for binance.com binance_com_api_key = "" binance_com_api_secret = "" # configure api key and secret for binance.je binance_je_api_key = "" binance_je_api_secret = "" # configure api key and secret for binance.us binance_us_api_key = "" binance_us_api_secret = "" # configure api key and secret for binance.us binance_com_iso_api_key = "" binance_com_iso_api_secret = "" # create instances of BinanceWebSocketApiManager binance_com_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com", throw_exception_if_unrepairable=True) binance_je_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.je") binance_us_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.us") binance_com_isolated_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-isolated_margin") # create the userData streams binance_com_user_data_stream_id = binance_com_websocket_api_manager.create_stream('arr', '!userData', api_key=binance_com_api_key, api_secret=binance_com_api_secret) binance_je_user_data_stream_id = binance_je_websocket_api_manager.create_stream('arr', '!userData', api_key=binance_je_api_key, api_secret=binance_je_api_secret) binance_us_user_data_stream_id = binance_us_websocket_api_manager.create_stream('arr', '!userData', api_key=binance_us_api_key, api_secret=binance_us_api_secret) binance_com_iso_user_data_stream_id = binance_com_isolated_websocket_api_manager.create_stream('arr', '!userData', symbols="trxbtc", api_key=binance_com_iso_api_key, api_secret=binance_com_iso_api_secret) # start a worker process to move the received stream_data from the stream_buffer to a print function worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_com_websocket_api_manager,)) worker_thread.start() worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_je_websocket_api_manager,)) worker_thread.start() worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_us_websocket_api_manager,)) worker_thread.start() worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_com_isolated_websocket_api_manager,)) worker_thread.start() # monitor the streams while True: binance_com_isolated_websocket_api_manager.print_stream_info(binance_com_iso_user_data_stream_id) binance_com_websocket_api_manager.print_summary() binance_je_websocket_api_manager.print_summary() binance_us_websocket_api_manager.print_summary() binance_com_isolated_websocket_api_manager.print_summary() time.sleep(5)
trophies_screen.py
from threading import Thread from kivymd.uix.screen import MDScreen from logic.score import get_score_history from logic.trophies.trophies_checker import check_trophies from uix.base_components.kmodal_view import KModalView from uix.components.trophy_content import TrophyContent from kivy.properties import BooleanProperty class TrophiesScreen(MDScreen): refreshing = BooleanProperty() def __init__(self, **kw): super().__init__(**kw) def on_enter(self, *args): Thread(target=self._load_data).start() def _load_data(self, delay=0): self.refreshing = True list_trophies = list() score_history = get_score_history() checked_trophies = check_trophies(score_history) for _, trophy in checked_trophies.items(): list_trophies.append({ 'name': trophy["name"], 'description': trophy["description"], 'image': trophy["image"], 'icon': "checkbox-multiple-marked-circle" if trophy["obtained"] else "", 'details': self.show_details}) self.ids.list_trophies.data = list_trophies self.refreshing = False def show_details(self, name, description, image): self.kmodal = KModalView(size_hint=(0.7, 0.4), auto_dismiss=True, background_color=[0, 0, 0, 0], content= TrophyContent(image=image, text=name, subtext=description)) self.kmodal.open() def to_home(self): self.manager.go_to_screen('home', direction='right')
RaccoonBasics.py
# # AutoDock | Raccoon2 # # Copyright 2013, Stefano Forli # Molecular Graphics Lab # # The Scripps Research Institute # _ # (,) T h e # _/ # (.) S c r i p p s # \_ # (,) R e s e a r c h # ./ # ( ) I n s t i t u t e # ' # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import Tkinter as tk import tkMessageBox as tmb import Pmw, tkFont, ImageTk import DebugTools import threading, Queue, time, sys, platform from CADD.Raccoon2 import HelperFunctionsN3P as hf class RaccoonDefaultWidget: """ base widget for defining all the common aestetic settings""" def __init__(self, parent=None, iconpath=None): if not parent == None: self.parent = parent else: self.parent = tk.Tk() if iconpath: self.iconpath = iconpath self.sysarch = platform.uname()[0] # BUG # this is a fix that's needed to parse multiple files selections # due to a bug in askopenfilenames ( http://bugs.python.org/issue5712 ) t = tk.Tk() t.withdraw() self._listfixer = t.splitlist #self._listfixer = self.parent.splitlist # BUG if self.sysarch=='Windows': normsize = 8 smallsize = 7 else: normsize = 9 smallsize = 8 family = 'Arial' #family = 'Helvetica' self.FONT = tkFont.Font(family=family,size=normsize) self.FONTbold = tkFont.Font(family=family,size=normsize,weight="bold") self.FONTsmall = tkFont.Font(family=family,size=smallsize) if self.parent: self.parent.option_add( "*font", "Arial %s bold" % normsize) self.parent.option_add( "*font", "Arial %s" % normsize) #self.parent.option_add('*Background', '#969b9d') self.BORDER = { 'bd':1,'highlightbackground':'black', 'borderwidth':2,'highlightcolor':'black','highlightthickness':1} class TabBase(DebugTools.DebugObj): def __init__(self, app, parent, debug = False): """ app is the calling application parent is the container where widgets will be created ( """ DebugTools.DebugObj.__init__(self, debug) self.app = app self.parent = parent self.frame = tk.Frame(parent) def resetFrame(self): self.frame.pack_forget() self.frame = tk.Frame(self.parent) class PanedManager(RaccoonDefaultWidget): """ """ def __init__(self, app=None, parent=None, wtitle='PanedManager', ltitle='Left', rtitle='Right', lwidth=0, rwidth=0): RaccoonDefaultWidget.__init__(self, parent) self.app = app self.wtitle = wtitle self.ltitle = ltitle self.rtitle = rtitle self.lwidth = lwidth self.rwidth = rwidth self.initIcons() self.makeWin() def __call__(self): #, geom='centerscreenalways'): self.win.activate() #geometry=geom) def destroy(self, event=None): """ """ # trigger the event refresh of server list self.win.deactivate() def makeWin(self): """ """ self.win = Pmw.Dialog(self.parent, title=self.wtitle, buttons = ('Close',), command = self.destroy) bbox = self.win.component('buttonbox') for i in range(bbox.numbuttons()): bbox.button(i).configure(font=self.FONT, default='disabled', **self.BORDER) #bbox.button(i).configure(relief='raised') self.pane = Pmw.PanedWidget(self.win.interior(), orient='horizontal', handlesize=-1, separatorthickness=10, separatorrelief='raised', ) left = self.pane.add('info', min=self.lwidth, size=self.lwidth) right = self.pane.add('viewer', min=self.rwidth, size=self.rwidth) handle = self.pane.component('handle-1') sep = self.pane.component('separator-1') self.pane.component('handle-1').place_forget() self.pane.component('handle-1').forget() self.pane.component('handle-1').pack_forget() self.pane.component('handle-1').grid_forget() self.pane.component('separator-1').configure(bd =2, #bg = '#999999' highlightthickness=1, highlightbackground='black', highlightcolor='black') lgroup = Pmw.Group(left, tag_text=self.ltitle, tag_font=self.FONTbold) lgroup.pack(expand=1,fill='both',side='top', anchor='center', padx=5,pady=5) self.left = lgroup.interior() rgroup = Pmw.Group(right, tag_text=self.rtitle, tag_font=self.FONTbold) rgroup.pack(expand=1,fill='both',side='left', anchor='center', padx=5,pady=5) self.right = rgroup.interior() # separator for rearranging spacer = tk.Frame(self.right, width=6) spacer.pack(expand=0, fill='y',side='left', anchor='w') # nail handle tk.Frame(sep,height=40,width=4,bg='#fffeee',relief='sunken',bd=1,highlightbackground='black', highlightthickness=1).pack( anchor='center', padx=2,pady=2,side='left',expand=0,fill=None) # LEFT self.listcontainer = Pmw.ScrolledListBox(parent = self.left) self.listcontainer.component('listbox').configure(bg='white', font=self.FONT) self.listcontainer.pack(expand=1, fill='both') # RIGHT # pack everything self.pane.pack(expand=1,fill='both') self.pane.component('hull').configure(width=800, height=400) class RacMenu(RaccoonDefaultWidget): """ Provide a self-destructible one-level pop-up menu (similar to what's usually associated to right-button mouse clicks) and save expensive time-machine bills for getting back to the '80 to get 'Dismiss' buttons supplies. parent : the parent widget (usually a button) items : a nested list items = [ ['title'], <- only one entry : title (separator automatically added) [optional, leave empty entry for nothing] ['entry', 'status', callback], <- thre entries : menu entries (text, normal/disabled, function) [], <- empty : separator ['entry', 'status', callback], <- thre entries : menu entries (text, normal/disabled, function) ['entry', 'status', callback], <- thre entries : menu entries (text, normal/disabled, function) ] toolbar : optional, is usually a frame that contain the parent widget and other widgets; it is used to keep track of multiple menus in the same tooblar placement: close, under, tuple(x,y)... (others don't work yet) floating : it is not tied to a specific widget (i.e. button toolbar) """ def __init__(self, parent, items=[], toolbar=False, placement='close', floating=False): RaccoonDefaultWidget.__init__(self, parent) self.menu = tk.Menu(parent, tearoff=False, takefocus=1) if not hasattr(self.parent, 'menu'): self.parent.menu = None self.items = items self._count = 0 self._maxlen = 0 self._posted = False self._caller = None self.floating = floating if toolbar: self.toolbar = toolbar self.toolbar.menu = None else: self.toolbar = False self.placement = placement self.parent.configure(relief='raised') self.populate() def populate(self): """ parse items and fill the menu """ self.parent.configure(relief='raised') for i in range(len(self.items)): entry = self.items[i] if i == 0: # LABEL if not entry == None: self._count += 2 self.menu.add_command(label=entry[0], state='disable', font=self.FONTbold, foreground='white', background='black' ) self._maxlen = max( self._maxlen, len(entry[0])) self.menu.add_separator() else: if len(entry) == 0: # SEPARATOR self._count += 1 self.menu.add_separator() else: # MENU ENTRY self._count += 1 self._maxlen = max( self._maxlen, len(entry[0])) l, s, c = entry self.menu.add_command(label = ' '+ l +' ', state= s, command = c, font = self.FONT) self.parent.configure(relief='raised') if not self.floating: self.parent.bind('<Button-1>', self) self.parent.bind('<Leave>', self._gozer) self.parent.bind('<FocusOut>', self._gozer) self.menu.configure(disabledforeground='white') self.menu.bind('<Leave>', self._gozer) self.menu.bind('<FocusOut>', self._gozer) def __call__(self, event=None): """ post the menu visible REMEMBER that all arrangements must not have discontinuities between the posted menu and the parent, otherwise a '<leave>' event would be triggered, calling _gozer) """ self.parent.configure(relief='raised') if self._posted: self._gozer(force=True) if self.toolbar: # check if the toolbar doesn't have other # menu's posted if hasattr(self.toolbar, 'menu'): if self.toolbar.menu: self.toolbar.menu._gozer(force=True) if self.placement == 'close': # NOTE the placement do not allow gaps! (otherwise they # will trigger the <leave> event!) x = self.parent.winfo_rootx() + self.parent.winfo_width()# +1 y = self.parent.winfo_rooty() self.menu.post(x, y) self.menu.bind('<Leave>', self._gozer) self._posted = True self.parent.menu = self if self.toolbar: self.toolbar.menu = self return elif self.placement == 'under': x = self.parent.winfo_rootx() #+ self.parent.winfo_width()# +1 y = self.parent.winfo_rooty() + self.parent.winfo_height() self.menu.post(x, y) self.menu.bind('<Leave>', self._gozer) self._posted = True self.parent.menu = self if self.toolbar: self.toolbar.menu = self return elif isinstance(self.placement, tuple): x, y = self.placement self.menu.post(x, y) self.menu.bind('<Leave>', self._gozer) self._posted = True self.parent.menu = self if self.toolbar: self.toolbar.menu = self self.menu.bind('<Leave>', self._gozer) self.menu.bind('<FocusOut>', self._gozer) return elif self.placement == 'left': x_off = -self._maxlen * 10 y_off = -8 elif self.placement == 'center': x_off = -self._maxlen * 5 # offset pixels per char y_off = -self.menu.yposition( self._count / 2) self._posted = True self.menu.post(event.x_root+x_off, event.y_root+y_off) def _gozer(self, event=None, force=False): """the Gozerian, the Destructor""" self.parent.configure(relief='raised') if force: self.menu.unpost() self._posted = False self.parent.menu = None return if not self._posted: return ex, ey = event.x_root, event.y_root mx0, my0 = self.menu.winfo_rootx(), self.menu.winfo_rooty() mw, mh = self.menu.winfo_width(), self.menu.winfo_height() mx1, my1 = mx0+mw, my0+mh px0, py0 = self.parent.winfo_rootx(), self.parent.winfo_rooty() pw, ph = self.parent.winfo_width(), self.parent.winfo_height() px1, py1 = px0+pw, py0+ph in_menu = False if mx0 <= ex < mx1: if my0 <= ey < my1: in_menu = True in_butt = False if not self.floating: if px0 <= ex < px1 : if py0 <= ey <= py1: in_butt = True if in_butt or in_menu: return self._posted = False self.parent.menu = None if self.toolbar: self.toolbar.menu = None self.menu.unpost() def _gozer__BETTER_BUT_OFF(self, event=None, force=True): """the Gozerian, the Destructor""" # mouse coords x, y = event.x_root, event.y_root # calling button coords bx, by = self.parent.winfo_rootx(), self.parent.winfo_rooty() bw, bh = self.parent.winfo_width(), self.parent.winfo_width() # menu button coords mx, my = self.menu.winfo_rootx(), self.menu.winfo_rooty() mw, mh = self.menu.winfo_width(), self.menu.winfo_height() in_menu = ((x>mx)and(x<mx+mw)) and ((y>my)and(y< my+mh)) in_button = ((x>bx)and(x<bx+bw)) and ((y>by)and(y< by+bh)) print "\n\n\nIN MENU", in_menu print "IN BUTTON", in_button if in_menu: print "INMENU< RETURNING" return if in_button: print "INBUTTION< RETURNONG" return self._posted = False self.menu.unpost() """ if event.widget.winfo_containing(event.x, event.y) == self.menu: return event = None if event: #print "EVENT", event, event.widget.unpost() self._posted = False else: self._posted = False self.menu.unpost() """ """ NOT ENOUGH TIME TO EXPERIMENT def __call__(self, event=None): "" post the menu visible"" if self._posted: return #center = self.menu.yposition( self._count / 2) # use .configure(postcommand = self._gozer?) w = event.widget if self.placement == 'center': x = w.winfo_rootx() y = w.winfo_rooty() - self.menu.yposition( int(self._count / 2.)) elif self.placement == 'n': x = w.winfo_rootx() y = w.winfo_rooty() - self.menu.yposition(self._count) elif self.placement == 'ne': x = w.winfo_rootx() + w.winfo_width()/2 y = w.winfo_rooty() - self.menu.yposition( int(self._count / 2.)) elif self.placement == 's': x = w.winfo_rootx() y = w.winfo_rooty() - self.menu.yposition(self._count) elif self.placement == 'close': x = 3 y = -3 self.menu.post(event.x_root+x, event.y_root+y) elif self.placement == 'right': #x_off = event.widget.1 pass self._posted = True self.menu.after(50, lambda: self.menu.bind('<Leave>', self._gozer) ) self.menu.post(x, y) """ class ProgressDialogWindow(RaccoonDefaultWidget , DebugTools.DebugObj): """ parent : Tk parent of the widget function : function to be threaded func_args : function arguments func_kwargs : function kw arguments title : title of the window message : message to be shown in the window (LABEL) operation : string with the description of the operation threaded (used for the notification message text) image : an ImageTk object ( or a file?) FIXME autoclose : automatically close the dialog when threaded function successfully terminated cb : callback passed to the threaded function, i.e. to perform updates progresstype : description of the progress, that can be False (disabled) or 'percent' (return float percent) or 'count' (return integer) showtime : Bool, show elapsed time debug : enable debug USAGE: the only requirement is that the function that is called should support as a parameter 'stopcheck' a function that returns True when the threaded function should stop (see self.start() ) func_kwargs = {'path': "/", 'pattern': '*', 'recursive' : True} func = hf.pathToList progressWin = ProgressDialogWindow(parent, title, func, func_kwargs) progressWin.start() """ def __init__(self, parent, function, func_args=(), func_kwargs={}, title='ProgressDialogWindow', message='threaded operation', operation = 'GenericProcessing', image = None, autoclose=False, cb = None, progresstype = None, showtime = True, debug=False): RaccoonDefaultWidget.__init__(self, parent) DebugTools.DebugObj.__init__(self, debug) self.title = title self._function = function self._args = func_args self._kwargs = func_kwargs self.image = image self.message = message self.operation = operation self.autoclose = autoclose self.progresstype = progresstype self.showtime = showtime self.cb = cb self.queue = Queue.Queue() self.progress = 0 self.pending = None self.status = 'not ready' self._STOP = False self._COMPLETED = False self._ERROR = None self._SLEEP = 0.1 # 075 # update interval (sec.) self.rolling = """-\|/""" # .oO@* """ #self.rolling = ".oO@* " def stop(self, event=None): """ halt the thread""" t = 'Confirm' i = 'warning' m = 'Stop %s?' % self.operation if not tmb.askyesno(parent=self.win.interior(), title=t, icon=i, message=m): return self._STOP = True self._COMPLETED = False self.closingProcedure() def changeButton(self, event=None): """ update the text of the button when the thread ends/get stopped""" self.win.component('buttonbox').button(0).configure(text='Close', command=self.close) def getOutput(self): """return the output of the threaded function """ if self.queue.empty(): return [] if self._ERROR: return [] output = self.queue.get() return output def checkStop(self): """ function used to check if the thread must stop""" return self._STOP def close(self, event=None): """close the window""" self.win.destroy() def buildGUI(self): """ set the widgets shown""" self.win = Pmw.Dialog(self.parent, title=self.title, buttons=('Stop',), command=self.stop) self.win.component('buttonbox').button(0).configure(font=self.FONT, default='disabled', **self.BORDER) # TITLE # MESSAGE # Status : [ Running, stopped, error # Error: xxxxxxxxxxxxx # Elapsed: # Progress/process i = tk.Frame(self.win.interior(), relief = 'flat') #, bd=2) #, bg='white') # message f = tk.Frame(i) # , bg='white') if self.image: tk.Label(f, image = self.image).pack(anchor='w', side='left', expand=0, padx=6, pady=6) tk.Label(f, text = self.message, font=self.FONT).pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x',pady=5,padx=3) # status i.pack(expand=0,fill='both') i = tk.Frame(self.win.interior(), relief = 'sunken', bd=2, bg='white') # SPACER #tk.Frame(i, height=2, bd=2, relief='sunken',bg='black').pack(anchor='n', side='top', expand=0,fill='x') f = tk.Frame(i, bg='white') tk.Label(f, text = 'Status : ', width = 20, anchor='e',bg='white', font=self.FONTbold).pack(anchor='w',side='left', expand=0, fill='x') self.status_label = tk.Label(f, width = 30, text = 'ready', anchor='w', bg='white', font=self.FONT) self.status_label.pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x', padx=3) # elapsed time if self.showtime: f = tk.Frame(i, bg='white') tk.Label(f, text = 'Elapsed time : ', width = 20, anchor='e', bg='white', font=self.FONTbold).pack(anchor='w',side='left', expand=0, fill='x') self.time_label = tk.Label(f, width = 30, text = '00 : 00 : 00', anchor='w', bg='white', font=self.FONT) self.time_label.pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x', padx=3) # error f = tk.Frame(i) self.error_title = tk.Label(f, text = ' ', fg='red', bg='white', width = 20, anchor='e', font=self.FONTbold) self.error_title.pack(anchor='w',side='left', expand=0, fill='x') self.error_label = tk.Label(f, width = 30, text = ' ', font=self.FONT, anchor='w', bg='white') self.error_label.pack(anchor='w',side='top', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x',padx=3) if self.progresstype == 'percent': #create percent bar f = tk.Frame(i, bg='white') self.progressBar = hf.ProgressBarThreadsafe(i, w = 300, h = 20) self.progressBar.pack(anchor='n', side='top', expand=0, fill='none') f.pack(side='top', anchor='n', expand=0, fill='none', padx=5, pady=8) elif self.progresstype == 'count': # create counter label # FIXME pass elif self.progresstype == None: # create the default "is alive" feedback self.dotsLabel = tk.Label(i, bg='white', text = self.rolling[0]) self.dotsLabel.pack() pass i.pack(expand=0,fill='both') def setPercent(self, value): """update the percentage widget""" self.progress = value def _wrapped_function(self): """ the wrapped version of the function to be threaded with the FIFO queue used to export results """ try: #if True: self._kwargs['stopcheck'] = self.checkStop if self.progresstype == 'percent': self._kwargs['showpercent'] = self.setPercent elif self.progresstype == 'count': #self._kwargs['showcount'] = self.setPercent pass res = self._function( *self._args, **self._kwargs) self.queue.put(res) except: #else: self._ERROR = sys.exc_info()[1] def start(self): """ show the widget and start the threaded function """ self.buildGUI() # trigger the actual start... self.win.after(200, self._run) self.win.activate() def _run(self): """ start the calculation and perform the GUI update""" self.status_label.configure(text = 'running') # create the thread and start it self.pending = threading.Thread(target = self._wrapped_function) self.pending.start() self.updateStatus() self._start_time = time.time() while self.status == 'running': time.sleep(self._SLEEP) self.updateStatus() self.updateGUI() self.updateGUI() def updateStatus(self): """ return the status of the job""" if self.pending == None: self.status = 'not started' elif self.pending.isAlive(): self.status = 'running' elif not self._STOP: self._COMPLETED = True self.status = 'completed' self.closingProcedure() elif self._STOP: self.COMPLETED = False self.status = 'stopped' self.closingProcedure() def updateGUI(self): """ update the progress status""" try: self.parent.update() if self.showtime: # update time s = int(time.time() - self._start_time) h = s/3600 remainder = s % 3600 m = remainder/60 s = remainder % 60 timestr = "%02d : %02d : %02d" % (h,m,s) self.time_label.configure(text = timestr) if self.progresstype == 'percent': self.progressBar.set(self.progress) return elif self.progresstype == 'count': # update counter # FIXME return # provide the generic progress self.rolling = self.rolling[1:] + self.rolling[0] self.dotsLabel.configure(text = self.rolling[0]) except tk.TclError: # possibly a latest update that was late pass except: print "[ unexpected error in RaccoonBasics, not dangerous... possibly...]" pass def closingProcedure(self): """ perform gui closing procedure when the threaded operation is completed """ if not self._ERROR == None: # enable the error field self.error_title.configure(text='Error : ') self.error_label.configure(text=self._ERROR) status = 'error' elif self._COMPLETED: if self.autoclose and not self._ERROR: self.close() return status = 'done' elif self._STOP: status = 'stopped' self.status_label.configure(text = status) if self.progresstype == None: self.dotsLabel.configure(text=' ') self.changeButton() class ProgressDialogWindowTk(RaccoonDefaultWidget , DebugTools.DebugObj): """ parent : Tk parent of the widget function : function to be threaded func_args : function arguments func_kwargs : function kw arguments title : title of the window message : message to be shown in the window (LABEL) operation : string with the description of the operation threaded (used for the notification message text) image : an ImageTk object ( or a file?) FIXME autoclose : automatically close the dialog when threaded function successfully terminated cb : callback passed to the threaded function, i.e. to perform updates progresstype : description of the progress, that can be False (disabled) or 'percent' (return float percent) or 'count' (return integer) showtime : Bool, show elapsed time debug : enable debug USAGE: the only requirement is that the function that is called should support as a parameter 'stopcheck' a function that returns True when the threaded function should stop (see self.start() ) func_kwargs = {'path': "/", 'pattern': '*', 'recursive' : True} func = hf.pathToList progressWin = ProgressDialogWindow(parent, title, func, func_kwargs) progressWin.start() """ def __init__(self, parent, function, func_args=(), func_kwargs={}, title='ProgressDialogWindow', message='threaded operation', operation = 'GenericProcessing', image = None, autoclose=False, cb = None, progresstype = None, showtime = True, debug=False): RaccoonDefaultWidget.__init__(self, parent) DebugTools.DebugObj.__init__(self, debug) self.title = title self._function = function self._args = func_args self._kwargs = func_kwargs self.image = image self.message = message self.operation = operation self.autoclose = autoclose self.progresstype = progresstype self.showtime = showtime self.cb = cb self.status = 'not ready' self.result = None self._STOP = False self._COMPLETED = False self._ERROR = None self._SLEEP = 0.1 # 075 # update interval (sec.) self.rolling = """-\|/""" # .oO@* """ self.rolling_counter = 0 self.rolling = unicode(u"\u2588\u2589\u258A\u258B\u258C\u258D\u258E\u258F") # horizontal #self.rolling = unicode(u"\u2581\u2582\u2583\u2584\u2585\u2586\u2587") # vertical #self.rolling = ".oO@* " def stop(self, event=None): """ halt the thread""" t = 'Confirm' i = 'warning' m = 'Stop %s?' % self.operation if not tmb.askyesno(parent=self.win.interior(), title=t, icon=i, message=m): return self._STOP = True self._COMPLETED = False self.closingProcedure() def changeButton(self, event=None): """ update the text of the button when the thread ends/get stopped""" self.win.component('buttonbox').button(0).configure(text='Close', command=self.close) def getOutput(self): """return the output of the threaded function """ #if self.queue.empty(): # return [] #if self._ERROR: # return [] #output = self.queue.get() return self.result def checkStop(self): """ function used to check if the thread must stop""" return self._STOP def close(self, event=None): """close the window""" self.win.destroy() def buildGUI(self): """ set the widgets shown""" self.win = Pmw.Dialog(self.parent, title=self.title, buttons=('Stop',), command=self.stop) button = self.win.component('buttonbox').button(0) button.configure(font=self.FONT, default='disabled', cursor='arrow', **self.BORDER) # TITLE # MESSAGE # Status : [ Running, stopped, error # Error: xxxxxxxxxxxxx # Elapsed: # Progress/process i = tk.Frame(self.win.interior(), relief = 'flat') #, bd=2) #, bg='white') # message f = tk.Frame(i) # , bg='white') if self.image: tk.Label(f, image = self.image).pack(anchor='w', side='left', expand=0, padx=6, pady=6) tk.Label(f, text = self.message, font=self.FONT).pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x',pady=5,padx=3) # status i.pack(expand=0,fill='both') i = tk.Frame(self.win.interior(), relief = 'sunken', bd=2, bg='white') # SPACER f = tk.Frame(i, bg='white') tk.Label(f, text = 'Status : ', width = 20, anchor='e',bg='white', font=self.FONTbold).pack(anchor='w',side='left', expand=0, fill='x') self.status_label = tk.Label(f, width = 30, text = 'ready', anchor='w', bg='white', font=self.FONT) self.status_label.pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x', padx=3) # elapsed time if self.showtime: f = tk.Frame(i, bg='white') tk.Label(f, text = 'Elapsed time : ', width = 20, anchor='e', bg='white', font=self.FONTbold).pack(anchor='w',side='left', expand=0, fill='x') self.time_label = tk.Label(f, width = 30, text = '00 : 00 : 00', anchor='w', bg='white', font=self.FONT) self.time_label.pack(anchor='w',side='left', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x', padx=3) # error f = tk.Frame(i) self.error_title = tk.Label(f, text = ' ', fg='red', bg='white', width = 20, anchor='e', font=self.FONTbold) self.error_title.pack(anchor='w',side='left', expand=0, fill='x') self.error_label = tk.Label(f, width = 30, text = ' ', font=self.FONT, anchor='w', bg='white') self.error_label.pack(anchor='w',side='top', expand=0, fill='x') f.pack(side='top', anchor='w', expand=0, fill='x',padx=3) if self.progresstype == 'percent': #create percent bar f = tk.Frame(i, bg='white') self.progressBar = hf.ProgressBarThreadsafe(i, w = 300, h = 20) self.progressBar.pack(anchor='n', side='top', expand=0, fill='none') f.pack(side='top', anchor='n', expand=0, fill='none', padx=5, pady=8) elif self.progresstype == 'count': # create counter label # FIXME pass elif self.progresstype == None: # create the default "is alive" feedback self.dotsLabel = tk.Label(i, bg='white', text = self.rolling[0]) self.dotsLabel.pack() pass i.pack(expand=0,fill='both') def setPercent(self, value): """update the percentage widget""" self.progress = value def start(self): """ initiate the processing """ # build the GUI self.buildGUI() # schedule the start function self.parent.after(100, self.run) # show gui self.win.activate() def getResults(self): """ set the complete variable to True""" return self.result def run(self,event=None): """ run the blocking function""" wrap_kwargs = { 'stopcheck' : self.checkStop, 'GUI' : self.win.interior(), } if self.progresstype == 'percent': wrap_kwargs['showpercent'] = self.progressBar.set self._kwargs.update(wrap_kwargs) self._start_time = time.time() # schedule an update self.win.interior().after(100, self.updateGUI ) try: self.result = self._function(**self._kwargs) self._COMPLETED = True except: self._ERROR = sys.exc_info()[1] self._STOP = True print "CATCHED ERROR!", self._ERROR #XXX REDEFINE THE CLOSE PROTOCOL TO BE CLOSE()!!!! def updateGUI(self): """ update the progress status""" if self._COMPLETED or self._STOP: self.closingProcedure() return self.win.interior().after(100, self.updateGUI ) if self.showtime: # update time s = int(time.time() - self._start_time) h = s/3600 remainder = s % 3600 m = remainder/60 s = remainder % 60 timestr = "%02d : %02d : %02d" % (h,m,s) self.time_label.configure(text = timestr) if self.progresstype: return #if self.progresstype == 'percent': # self.progressBar.set(self.progress) # return #elif self.progresstype == 'count': # # update counter # FIXME # return # provide the generic progress #if self.rolling_counter > len(self.rolling): # self.rolling=self.rolling[::-1] # self.rolling_counter = 0 # print "inverting" self.rolling = self.rolling[1:] + self.rolling[0] #string = " ".join(list(self.rolling[::-1]) ) string = " ".join(list(self.rolling)) self.dotsLabel.configure(text = string, fg='blue', bg='white') #, **self.BORDER) # [0]) self.rolling_counter += 1 def closingProcedure(self): """ perform gui closing procedure when the threaded operation is completed """ testers = [] if not self._ERROR == None: # enable the error field self.error_title.configure(text='Error : ') self.error_label.configure(text=self._ERROR) status = 'error' elif self._COMPLETED: if self.autoclose and not self._ERROR: self.close() return status = 'done' elif self._STOP: status = 'stopped' self.status_label.configure(text = status) if self.progresstype == None: self.dotsLabel.configure(text=' ') self.changeButton() class About(RaccoonDefaultWidget): """ refined about window""" def __init__(self, parent=None, iconpath=None): RaccoonDefaultWidget.__init__(parent, iconpath) self.tabs = {} self.win = Pmw.Dialog(self.parent, title='About Raccoon2', buttons=('Close',)) self.notebook = Pmw.NoteBook(self.parent) for tab in ['About', 'Funding & Acknowledgements']: self.tabs[tab] = self.notebook.add(tab) self.notebook.pack(expand=1, fill='both') self.notebook.setnaturalsize(self.tabs.keys()) self.initImages() self.populateAbout() self.populateAuthors() self.populateFundingAck() self.win.activate() def initImages(self): """ initialize the icons for the interface""" images_path = CADD.Raccoon2.ICONPATH f = icon_path + os.sep + '.png' self._ICON_sys = ImageTk.PhotoImage(Image.open(f)) def populateAbout(self): """ about raccoon""" text = ('AutoDock | Raccoon 2\n' 'v1.0beta rev 0.1\n') pass def populateAuthors(self): """ set authors names""" authors = ['Stefano Forli, TSRI', 'Arthur J. Olson, TSRI', 'Michel Sanner, TSRI'] def populateFundingAck(self): thanks = ['Lisa Dong', 'Jc Ducomm', 'Alex L. Perryman', 'Luca', 'Peter'] pass
tests.py
# Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import copy import io import os import pickle import re import shutil import tempfile import threading import time import unittest from pathlib import Path from unittest import mock from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches, ) from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from django.test.signals import setting_changed from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers, ) from django.views.decorators.cache import cache_control, cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() KEY_ERRORS_WITH_MEMCACHED_MSG = ( 'Cache key contains characters that will cause errors if used with ' 'memcached: %r' ) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertTrue(result) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_get_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.get_many(['key with spaces']) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertIsNone(cache.get("key1")) cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertFalse(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr('answer') with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr('answer') with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_touch(self): """Dummy cache can't do touch().""" self.assertIs(cache.touch('whatever'), False) def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertIsNone(cache.get("stuff")) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertIsNone(cache.get("expire2")) self.assertFalse(cache.has_key("expire3")) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" self.assertEqual(cache.set_many({'a': 1, 'b': 2}), []) self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), []) def test_set_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.set_many({'key with spaces': 'foo'}) def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_delete_many_invalid_key(self): with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'): cache.delete_many({'key with spaces': 'foo'}) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr_version('answer') with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr_version('answer') with self.assertRaises(ValueError): cache.decr_version('does_not_exist') def test_get_or_set(self): self.assertEqual(cache.get_or_set('mykey', 'default'), 'default') self.assertIsNone(cache.get_or_set('mykey', None)) def test_get_or_set_callable(self): def my_callable(): return 'default' self.assertEqual(cache.get_or_set('mykey', my_callable), 'default') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_default_used_when_none_is_set(self): """If None is cached, get() returns it instead of the default.""" cache.set('key_default_none', None) self.assertIsNone(cache.get('key_default_none', default='default')) def test_add(self): # A key can be added to a cache cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertFalse(result) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertFalse(caches['prefix'].has_key('somekey')) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(cache.get("key1"), "spam") cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertTrue(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) cache.set("no_expiry", "here", None) self.assertTrue(cache.has_key("no_expiry")) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertFalse(cache.has_key("expire3")) def test_touch(self): # cache.touch() updates the timeout. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1', timeout=4), True) time.sleep(2) self.assertTrue(cache.has_key('expire1')) time.sleep(3) self.assertFalse(cache.has_key('expire1')) # cache.touch() works without the timeout argument. cache.set('expire1', 'very quickly', timeout=1) self.assertIs(cache.touch('expire1'), True) time.sleep(2) self.assertTrue(cache.has_key('expire1')) self.assertIs(cache.touch('nonexistent'), False) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): with self.subTest(key=key): cache.delete(key) cache.add(key, value) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): with self.subTest(key=key): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add cache.add('binary1-add', compressed_value) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_returns_empty_list_on_success(self): """set_many() returns an empty list when all keys are inserted.""" failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'}) self.assertEqual(failing_keys, []) def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'}) cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set_many({'key1': 'spam', 'key2': 'eggs'}) cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Followe memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', None) self.assertEqual(cache.get('key2'), 'ham') added = cache.add('key1', 'new eggs', None) self.assertIs(added, False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') cache.set('key5', 'belgian fries', timeout=1) cache.touch('key5', timeout=None) time.sleep(2) self.assertEqual(cache.get('key5'), 'belgian fries') def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) cache.add('key2', 'ham', 0) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4')) cache.set('key5', 'belgian fries', timeout=5) cache.touch('key5', timeout=0) self.assertIsNone(cache.get('key5')) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache, initial_count, final_count): # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test(caches['cull'], 50, 29) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 19) def _perform_invalid_key_test(self, key, expected_warning): """ All the builtin backends (except memcached, see below) should warn on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func try: with self.assertWarnsMessage(CacheKeyWarning, expected_warning): cache.set(key, 'value') finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = 'key with spaces and 清' self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key) def test_invalid_key_length(self): # memcached limits key length to 250. key = ('a' * 250) + '清' expected_warning = ( 'Cache key will cause errors if used with memcached: ' '%r (longer than %s)' % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertIsNone(cache.get('answer1', version=2)) self.assertIsNone(caches['v2'].get('answer1')) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertIsNone(caches['v2'].get('answer1', version=2)) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertIsNone(cache.get('answer2')) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertIsNone(cache.get('answer3')) self.assertIsNone(cache.get('answer3', version=1)) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertIsNone(caches['v2'].get('answer3', version=1)) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertIsNone(cache.get('answer4', version=2)) self.assertIsNone(caches['v2'].get('answer4')) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertIsNone(caches['v2'].get('answer4', version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache.add('answer1', 42, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=1) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 caches['v2'].add('answer2', 42) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37, version=1) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 caches['v2'].add('answer3', 42, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertTrue(cache.has_key('answer1')) self.assertTrue(cache.has_key('answer1', version=1)) self.assertFalse(cache.has_key('answer1', version=2)) self.assertFalse(caches['v2'].has_key('answer1')) self.assertTrue(caches['v2'].has_key('answer1', version=1)) self.assertFalse(caches['v2'].has_key('answer1', version=2)) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.delete('answer1') self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.delete('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertIsNone(cache.get('answer2', version=2)) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].delete('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertIsNone(cache.get('answer3', version=2)) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].delete('answer4', version=1) self.assertIsNone(cache.get('answer4', version=1)) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) cache.decr('answer1') self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) cache.decr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) caches['v2'].decr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) caches['v2'].decr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertIsNone(cache.get('answer', version=3)) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertIsNone(cache.get('answer', version=2)) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertIsNone(caches['v2'].get('answer2', version=3)) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertIsNone(caches['v2'].get('answer2')) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertIsNone(caches['v2'].get('answer2', version=2)) self.assertEqual(caches['v2'].get('answer2', version=3), 42) with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertIsNone(cache.get('answer', version=2)) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertIsNone(caches['v2'].get('answer2')) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertIsNone(caches['v2'].get('answer2', version=2)) with self.assertRaises(ValueError): cache.decr_version('does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertIsNone(caches['custom_key'].get('answer1')) self.assertIsNone(caches['custom_key2'].get('answer1')) caches['custom_key'].set('answer2', 42) self.assertIsNone(cache.get('answer2')) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpicklable_object(self): update_middleware = UpdateCacheMiddleware() update_middleware.cache = cache fetch_middleware = FetchFromCacheMiddleware() fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Testing cookie serialization.' response.content = content response.set_cookie('foo', 'bar') update_middleware.process_response(request, response) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) update_middleware.process_response(request, get_cache_data) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add('unpicklable', Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set('unpicklable', Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertIsNone(cache.get_or_set('null', None)) def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value') def test_get_or_set_callable_returning_none(self): self.assertIsNone(cache.get_or_set('mykey', lambda: None)) # Previous get_or_set() doesn't store None in the cache. self.assertEqual(cache.get('mykey', 'default'), 'default') def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" cache.get_or_set('brian', 1979, version=2) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian') with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3)) def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default') @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_get_many_num_queries(self): cache.set_many({'a': 1, 'b': 2}) cache.set('expired', 'expired', 0.01) with self.assertNumQueries(1): self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2}) time.sleep(0.02) with self.assertNumQueries(2): self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2}) def test_delete_many_num_queries(self): cache.set_many({'a': 1, 'b': 2, 'c': 3}) with self.assertNumQueries(1): cache.delete_many(['a', 'b', 'c']) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command('createcachetable', stdout=out) self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Use another table name to avoid the 'table already exists' message. LOCATION='createcachetable_dry_run_mode' )) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command('createcachetable', dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def allow_migrate(self, db, app_label, **hints): if app_label == 'django_cache': return db == 'other' return None @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): databases = {'default', 'other'} @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections['other'].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using='other'): management.call_command('createcachetable', database='other', verbosity=0) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): self.locked = self.cache._lock.locked() return {} limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', OPTIONS={'MAX_ENTRIES': 9}, )) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertIsNone(caches['other'].get('value')) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") cache.add('add', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] cache.incr(key) self.assertEqual(expire, cache._expire_info[_key]) cache.decr(key) self.assertEqual(expire, cache._expire_info[_key]) @limit_locmem_entries def test_lru_get(self): """get() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) @limit_locmem_entries def test_lru_set(self): """set() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(3, 9): cache.set(key, key, timeout=None) cache.set(9, 9, timeout=None) for key in range(3, 10): self.assertEqual(cache.get(key), key) for key in range(3): self.assertIsNone(cache.get(key)) @limit_locmem_entries def test_lru_incr(self): """incr() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): cache.incr(key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key + 1) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params['BACKEND']] = _cache_params MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache') PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache') # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {'cull', 'zero_cull'} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ['server1.tld', 'server2:11211'], 'server1.tld;server2:11211', 'server1.tld,server2:11211', ] for location in locations: with self.subTest(location=location): params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location} with self.settings(CACHES={'default': params}): self.assertEqual(cache._servers, ['server1.tld', 'server2:11211']) def test_invalid_key_characters(self): """ On memcached, we don't introduce a duplicate key validation step (for speed reasons), we just let the memcached API library raise its own exception on bad keys. Refs #6447. In order to be memcached-API-library agnostic, we only assert that a generic exception of some kind is raised. """ # memcached does not allow whitespace or control characters in keys # when using the ascii protocol. with self.assertRaises(Exception): cache.set('key with spaces', 'value') def test_invalid_key_length(self): # memcached limits key length to 250 with self.assertRaises(Exception): cache.set('a' * 251, 'value') def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None)): cache.set('infinite_foo', 'bar') self.assertEqual(cache.get('infinite_foo'), 'bar') def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000)): cache.set('future_foo', 'bar') self.assertEqual(cache.get('future_foo'), 'bar') def test_cull(self): # culling isn't implemented, memcached deals with it. pass def test_zero_cull(self): # culling isn't implemented, memcached deals with it. pass def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. # pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can # tell from a quick check of its source code. This is falling back to # the default value exposed by python-memcached on my system. max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576) cache.set('small_value', 'a') self.assertEqual(cache.get('small_value'), 'a') large_value = 'a' * (max_value_length + 1) try: cache.set('small_value', large_value) except Exception: # Some clients (e.g. pylibmc) raise when the value is too large, # while others (e.g. python-memcached) intentionally return True # indicating success. This test is primarily checking that the key # was deleted, so the return/exception behavior for the set() # itself is not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get('small_value') self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) def test_set_many_returns_failing_keys(self): def fail_set_multi(mapping, *args, **kwargs): return mapping.keys() with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi): failing_keys = cache.set_many({'key': 'value'}) self.assertEqual(failing_keys, ['key']) @unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, )) class MemcachedCacheTests(BaseMemcachedTests, TestCase): base_params = MemcachedCache_params client_library_name = 'memcache' def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key in settings.CACHES: with self.subTest(cache_key=cache_key): self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, OPTIONS={'server_max_value_length': 9999}, )) def test_memcached_options(self): self.assertEqual(cache._cache.server_max_value_length, 9999) def test_default_used_when_none_is_set(self): """ python-memcached doesn't support default in get() so this test overrides the one in BaseCacheTests. """ cache.set('key_default_none', None) self.assertEqual(cache.get('key_default_none', default='default'), 'default') @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, )) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params client_library_name = 'pylibmc' # libmemcached manages its own connections. should_disconnect_on_close = False # By default, pylibmc/libmemcached don't verify keys client-side and so # this test triggers a server-side bug that causes later tests to fail # (#19914). The `verify_keys` behavior option could be set to True (which # would avoid triggering the server-side bug), however this test would # still fail due to https://github.com/lericson/pylibmc/issues/219. @unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail") def test_invalid_key_characters(self): pass @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ 'binary': True, 'behaviors': {'tcp_nodelay': True}, }, )) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = self.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): cache_params['LOCATION'] = self.dirname setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def mkdtemp(self): return tempfile.mkdtemp() def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') self.assertTrue(os.path.exists(self.dirname)) def test_get_ignores_enoent(self): cache.set('foo', 'bar') os.unlink(cache._key_to_file('foo')) # Returns the default instead of erroring. self.assertEqual(cache.get('foo', 'baz'), 'baz') def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch('builtins.open', side_effect=OSError): with self.assertRaises(OSError): cache.get('foo') def test_empty_cache_file_considered_expired(self): cache_file = cache._key_to_file('foo') with open(cache_file, 'wb') as fh: fh.write(b'') with open(cache_file, 'rb') as fh: self.assertIs(cache._is_expired(fh), True) class FileBasedCachePathLibTests(FileBasedCacheTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) DEFAULT_MEMORY_CACHES_SETTINGS = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del(self.DEFAULT_TIMEOUT) def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ALLOWED_HOSTS=['.example.com'], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" host = 'www.example.com' path = '/cache/test/' factory = RequestFactory(HTTP_HOST=host) def tearDown(self): cache.clear() def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('*', ('Accept-Language', 'Cookie'), '*'), ('Accept-Language, Cookie', ('*',), '*'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. key_prefix = 'localprefix' learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, {'private'}), ('', {'private': True}, {'private'}), # no-cache. ('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}), ('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}), ('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}), ( 'no-cache=Set-Cookie,no-cache=Link', {'no_cache': 'Custom'}, {'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'}, ), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, {'private'}), ('private', {'public': True}, {'public'}), ('public', {'public': True}, {'public'}), ('public', {'private': True}, {'private'}), ('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}), ('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: with self.subTest(initial_cc=initial_cc, newheaders=newheaders): response = HttpResponse() if initial_cc is not None: response['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=[ ('en', 'English'), ('es', 'Spanish'), ], ) class CacheI18nTest(SimpleTestCase): path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False) def test_cache_key_i18n_formatting(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when formatting is active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): translation.activate(lang) response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) # first access, cache must return None self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Check for cache with QUERY_STRING' response.content = content UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) content = ['Check for cache with streaming content.'] response = StreamingHttpResponse(content) UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) def csrf_view(request): return HttpResponse(csrf(request)['csrf_token']) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(SimpleTestCase): factory = RequestFactory() def setUp(self): self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If no arguments are passed in construction, it's being used as middleware. middleware = CacheMiddleware() # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') # If arguments are being passed in construction, it's being used as a decorator. # First, test with "defaults": as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, 'default') # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo') self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') def test_middleware(self): middleware = CacheMiddleware() prefix_middleware = CacheMiddleware(key_prefix='prefix1') timeout_middleware = CacheMiddleware(cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') def test_cached_control_private_not_cached(self): """Responses with 'Cache-Control: private' are not cached.""" view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view)) request = self.factory.get('/view/') response = view_with_private_cache(request, '1') self.assertEqual(response.content, b'Hello World 1') response = view_with_private_cache(request, '2') self.assertEqual(response.content, b'Hello World 2') def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ csrf_middleware = CsrfViewMiddleware() cache_middleware = CacheMiddleware() request = self.factory.get('/view/') self.assertIsNone(cache_middleware.process_request(request)) csrf_middleware.process_view(request, csrf_view, (), {}) response = csrf_view(request) response = csrf_middleware.process_response(request, response) response = cache_middleware.process_response(request, response) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get('/view/') # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn('Cache-Control', response) self.assertIn('Expires', response) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ path = '/cache/test/' factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc') def test_with_ints_vary_on(self): key = make_template_fragment_key('foo', [1, 2, 3, 4, 5]) self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461') def test_with_unicode_vary_on(self): key = make_template_fragment_key('foo', ['42º', '😀']) self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237') def test_long_vary_on(self): key = make_template_fragment_key('foo', ['x' * 10000]) self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a') class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1])
LLRPConnector.py
#!/usr/bin/env python #------------------------------------------------------ # Copyright (C) 2013-2019 Edward Sitarski # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # import six from six.moves.queue import Queue, Empty import socket import datetime import threading from .pyllrp import * class LLRPConnector( object ): #-------------------------------------------------------------------------- # # A simple LLRP reader connection manager. # # Supports connecting to the reader, transacting commands, message handlers, # and asynchronous/synchronous monitoring of the reader socket. # def __init__( self ): self.TimeoutSecs = 6 # Time for the reader to respond. self._reset() self.handlers = {} def _reset( self ): ''' Reset all internal fields. ''' self.host = None self.port = None self.readerSocket = None self.thread = None self.shutdownQ = None # Used to shutdown the thread. self.keepGoing = False self.timeCorrection = None def connect( self, host, port = 5084 ): ''' Connect to a reader. ''' self._reset() self.readerSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) # Set a timeout for the socket. This is also the maximum time it will take to shut down the listener. self.readerSocket.settimeout( self.TimeoutSecs ) self.readerSocket.connect( (host, port) ) self.host = host self.port = port # Expecting READER_EVENT_NOTIFICATION message. response = UnpackMessageFromSocket( self.readerSocket ) tNow = datetime.datetime.now() # Get the time here to minimize latency. # Check if the connection succeeded. connectionAttemptEvent = response.getFirstParameterByClass(ConnectionAttemptEvent_Parameter) if connectionAttemptEvent and connectionAttemptEvent.Status != ConnectionAttemptStatusType.Success: self.disconnect() raise EnvironmentError( connectionAttemptEvent.Status, ConnectionAttemptStatusType.getName(connectionAttemptEvent.Status).replace('_',' ') ) self.keepGoing = True # Compute a correction between the reader's time and the computer's time. self.timeCorrection = None try: microseconds = response.getFirstParameterByClass(UTCTimestamp_Parameter).Microseconds readerTime = datetime.datetime.utcfromtimestamp( microseconds / 1000000.0 ) self.timeCorrection = tNow - readerTime except Exception as e: self.disconnect() raise ValueError('Missing Timestamp: ' + response.__repr__()) return response def tagTimeToComputerTime( self, tagTime ): # Time is in microseconds from Jan 1, 1970. return datetime.datetime.utcfromtimestamp( tagTime / 1000000.0 ) + self.timeCorrection def disconnect( self ): ''' Disconnect from a reader. Also stops the listener. ''' self.timeCorrection = None if not self.readerSocket: return None if self.isListening(): self.stopListener() # Send the reader a disconnect message. response = None try: response = self.transact( CLOSE_CONNECTION_Message() ) except: pass self.readerSocket.close() self.readerSocket = None return response def addHandler( self, messageClass, handlerFunc ): ''' Add a handler for a specific message type. ''' ''' Support multiple handlers for the same message type. ''' self.handlers.setdefault( messageClass, [] ).append( handlerFunc ) def removeHandler( self, messageClass, handlerFunc = None ): ''' Remove a handler for a specific message type from the reader. If handlerFunc is None, all handlers for the given messageClass will be removed. If handlerFunc is not None, only the specific handler is removed. ''' if handlerFunc is None: try: del self.handlers[messageClass] except KeyError: pass else: while 1: try: self.handlers[messageClass].remove( handlerFunc ) except (KeyError, ValueError): break def removeAllHandlers( self ): self.handlers = {} def transact( self, message ): ''' Send a message to the reader and wait for the response. ''' assert not self.isListening(), 'Cannot perform transact() while listen thread is active. Stop it first with stopListener().' message.send( self.readerSocket ) response = WaitForMessage( message.MessageID, self.readerSocket, self.callHandler ) return response def checkKeepGoing( self ): ''' Check if we should continue the reader thread. ''' if not self.keepGoing: return False try: # Check the shutdown queue for a message. If there is one, shutdown. d = self.shutdownQ.get( False ) self.shutdownQ.task_done() self.keepGoing = False return False except (AttributeError, Empty): return True def callHandler( self, message ): ''' Call all the handlers for this message. ''' for cb in (self.handlers.get(message.__class__, None) or self.handlers.get('default', [])): cb( self, message ) def listen( self ): ''' Listen for messages from the reader. ''' # Calling this by itself will block. # Recommended usage is to use startListener and stopListener. while self.checkKeepGoing(): try: response = UnpackMessageFromSocket( self.readerSocket ) except socket.timeout: continue self.callHandler( response ) def startListener( self ): ''' Starts a thread to listen to the reader. ''' assert self.readerSocket, 'Cannot start listener without a successful connection.' assert not self.thread, 'Listener is already running. Stop it first with stopListener().' self.shutdownQ = Queue() self.keepGoing = True self.thread = threading.Thread( target = self.listen, name='LLRP Listener' ) self.thread.daemon = True self.thread.start() def stopListener( self ): ''' Stops the thread listening to the reader. ''' self.shutdownQ.put( 'Shutdown' ) self.thread.join() # Wait for the thread to terminate. self.shutdownQ = None self.thread = None def isListening( self ): return self.thread and self.thread.is_alive()
run_service.py
# -*- coding: utf-8 -*- """ Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Functions for starting the service SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals import threading # Global cache must not be used within these modules, because stale values may # be used and cause inconsistencies! from resources.lib.globals import g from resources.lib.common import (info, error, select_port, get_local_string) from resources.lib.upgrade_controller import check_service_upgrade try: # Python 2 unicode except NameError: # Python 3 unicode = str # pylint: disable=redefined-builtin class NetflixService(object): """ Netflix addon service """ from resources.lib.services.msl.http_server import MSLTCPServer from resources.lib.services.nfsession.http_server import NetflixTCPServer SERVERS = [ { 'name': 'MSL', 'class': MSLTCPServer, 'instance': None, 'thread': None}, { 'name': 'NS', 'class': NetflixTCPServer, 'instance': None, 'thread': None}, ] def __init__(self): for server in self.SERVERS: self.init_server(server) self.controller = None self.library_updater = None self.settings_monitor = None def init_server(self, server): server['class'].allow_reuse_address = True server['instance'] = server['class']( ('127.0.0.1', select_port(server['name']))) server['thread'] = threading.Thread( target=server['instance'].serve_forever) def start_services(self): """ Start the background services """ from resources.lib.services.playback.controller import PlaybackController from resources.lib.services.library_updater import LibraryUpdateService from resources.lib.services.settings_monitor import SettingsMonitor for server in self.SERVERS: server['instance'].server_activate() server['instance'].timeout = 1 server['thread'].start() info('[{}] Thread started'.format(server['name'])) self.controller = PlaybackController() self.library_updater = LibraryUpdateService() self.settings_monitor = SettingsMonitor() # Mark the service as active from xbmcgui import Window window_cls = Window(10000) window_cls.setProperty('nf_service_status', 'running') if not g.ADDON.getSettingBool('disable_startup_notification'): from resources.lib.kodi.ui import show_notification show_notification(get_local_string(30110)) def shutdown(self): """ Stop the background services """ from xbmcgui import Window window_cls = Window(10000) window_cls.setProperty('nf_service_status', 'stopped') for server in self.SERVERS: server['instance'].server_close() server['instance'].shutdown() server['instance'] = None server['thread'].join() server['thread'] = None info('Stopped MSL Service') def run(self): """Main loop. Runs until xbmc.Monitor requests abort""" # pylint: disable=broad-except try: self.start_services() except Exception as exc: from xbmcgui import Window window_cls = Window(10000) window_cls.setProperty('nf_service_status', 'stopped') import traceback from resources.lib.kodi.ui import show_addon_error_info error(traceback.format_exc()) show_addon_error_info(exc) return while not self.controller.abortRequested(): if self._tick_and_wait_for_abort(): break self.shutdown() def _tick_and_wait_for_abort(self): try: self.controller.on_playback_tick() self.library_updater.on_tick() except Exception as exc: # pylint: disable=broad-except import traceback from resources.lib.kodi.ui import show_notification error(traceback.format_exc()) show_notification(': '.join((exc.__class__.__name__, unicode(exc)))) return self.controller.waitForAbort(1) def run(argv): # Initialize globals right away to avoid stale values from the last addon invocation. # Otherwise Kodi's reuseLanguageInvoker will cause some really quirky behavior! # PR: https://github.com/xbmc/xbmc/pull/13814 g.init_globals(argv) check_service_upgrade() NetflixService().run()
deepracer_memory.py
from threading import Thread, Event, Lock import pickle import queue import logging import redis from rl_coach.memories.backend.memory import MemoryBackend from rl_coach.core_types import Episode from markov.utils import Logger LOG = Logger(__name__, logging.INFO).get_logger() # Channel used by the training worker to request episodes WORKER_CHANNEL = 'worker_channel' # The amount of time to wait before querying the socket POLL_TIME = 10.0 # Since all the data is handled by the physical memory, there # is a limit to the number of steps that can # be contained in a rollout. This number was determined empirically, # as it seems rl_coach is making # a bunch of hard copies of the transitions # # Cutting down to 5000 from 10000 as the state size is increased: # - front-facing-camera -> stereo + left_camera + lidar # - We should be able to handle 6000, but reducing to 5000 to be safe. # TODO: We need better approach to handle this memory cap. MAX_MEMORY_STEPS = 5000 def get_endpoint_helper(redis_address, redis_port): '''Helper method that returns a dict with the address and port redis_address - address to be returned in the dict redis_port - Port to be returned in the dict ''' return {'redis_address': redis_address, 'redis_port': redis_port} class DeepRacerRolloutBackEnd(MemoryBackend): ''' Class used by the rollout worker to publish data to the training worker''' def __init__(self, params, num_consecutive_playing_steps, agent_name): ''' params - Struct containing all the necessary redis parammeters, see RedisPubSubMemoryBackendParameters num_consecutive_playing_steps - Struct containing the number of episodes to collect before performing a training iteration ''' self.agent_name = agent_name # List of tuples containing the episode number and the episode data self.data = list() # The episode number of the last episode produced by the rollout worker self.last_episode_num = 0 # The last episode number requested by trainer worker self.last_request_episode_num = -1 # The max number of episodes to collect before performing a training iteration self.total_episodes = num_consecutive_playing_steps.num_steps # Redis params self.params = params # Redis topic name self.topic_name = self.params.channel + '_' + self.agent_name # thread lock self._lock = Lock() # Redis client that will allow us to publish and subscribe to messages self.data_client = redis.Redis(self.params.redis_address, self.params.redis_port) # Pubsub object that will allow us to subscribe to the data req channels this # allow us to get request from the subscriber self.data_pubsub = self.data_client.pubsub() # Handle request via call back self.data_pubsub.subscribe(**{WORKER_CHANNEL + '_' + self.agent_name: self.data_req_handler}) self.data_pubsub.run_in_thread() def data_req_handler(self, message): ''' Message handler for training worker request message - Request from trainer worker containing the desired episode number ''' episode = -1 try: episode = pickle.loads(message['data']) if episode < 0: LOG.info("Negative episode index value") return with self._lock: self.last_request_episode_num = episode if episode < len(self.data): self.data_client.publish(self.topic_name, pickle.dumps(self.data[episode])) # If the trainer requests the total episodes we know that the trainer has all the # episodes so we will reset the data if episode == self.total_episodes: del self.data[:] self.last_episode_num = 0 self.last_request_episode_num = -1 # Send an ACK letting the trainer know we have reset the data and it is safe # to train self.data_client.publish(self.topic_name, pickle.dumps((self.total_episodes + 1, ""))) except redis.ConnectionError as ex: LOG.info("Redis connection error: %s", ex) except pickle.PickleError as ex: LOG.info("Could not decode/encode trainer request %s", ex) except Exception as ex: LOG.info("Rollout worker data_req_handler %s", ex) def store(self, obj): ''' Stores the data object into the data list along with episode number obj - Data object to be stored in the data list ''' with self._lock: self.data.append((self.last_episode_num, obj)) # DeepRacerRolloutBackEnd ignores the trainer's request if # the data isn't ready at the time. But since we know trainer is waiting # send the data as soon as it becomes ready. if self.last_episode_num <= self.last_request_episode_num: self.data_client.publish(self.topic_name, pickle.dumps(self.data[self.last_episode_num])) self.last_episode_num += 1 def get_endpoint(self): '''Returns a dict with the redis address and port ''' return get_endpoint_helper(self.params.redis_address, self.params.redis_port) class DeepRacerTrainerBackEnd(MemoryBackend): '''Class used by the training worker to retrieve the data from the rollout worker ''' def __init__(self, params, agents_params): ''' params - Struct containing all the necessary redis parammeters, see RedisPubSubMemoryBackendParame ''' # Redis params self.params = params # Track the total steps taken in the rollout self.rollout_steps = dict() # Episode number whose data is to be retrieved from the rollout worker self.episode_req = 0 # Episodes in rollout self.total_episodes_in_rollout = 0 # Queue object to hold data from the rollout worker while waiting to be consumed self.data_queues = dict() # Flag to notify the publish worker that data should be requested self.request_data = False # Redis client that will allow us to publish and subscribe to messages self.data_client = redis.Redis(self.params.redis_address, self.params.redis_port) # Pubsub object that will allow us to subscribe to the data channel and request data self.data_pubsubs = dict() self.request_events = dict() for agent_param in agents_params: self.rollout_steps[agent_param.name] = 0 self.request_events[agent_param.name] = Event() self.data_queues[agent_param.name] = queue.Queue(1) self.data_pubsubs[agent_param.name] = self.data_client.pubsub() # Handle data returning from the rollout worker via callback subscriber = (lambda a: lambda m: self.data_handler(m, a))(agent_param.name) self.data_pubsubs[agent_param.name].subscribe(**{self.params.channel + '_' + agent_param.name: subscriber}) self.data_pubsubs[agent_param.name].run_in_thread() # Use a seperate thread to request data publish_worker = (lambda a: lambda: self.publish_worker(a))(agent_param.name) Thread(target=publish_worker).start() def data_handler(self, message, agent_name): ''' Message handler for data sent from the rollout worker message - Tuple sent from the rollout worker containing episode number and data ''' try: obj = pickle.loads(message['data']) if isinstance(obj, tuple): self.data_queues[agent_name].put_nowait(obj) except queue.Full: pass except Exception as ex: LOG.info("Trainer data handler error: %s", ex) def get_rollout_steps(self): '''Returns the total number of steps in a rollout ''' return self.rollout_steps def get_total_episodes_in_rollout(self): '''Return the total number of episodes collected in the rollout ''' return self.total_episodes_in_rollout def publish_worker(self, agent_name): ''' Worker responsible for requesting data from the rollout worker''' while True: try: if self.request_data: # Request the desired episode self.data_client.publish(WORKER_CHANNEL + '_' + agent_name, pickle.dumps(self.episode_req)) self.request_events[agent_name].wait(POLL_TIME) self.request_events[agent_name].clear() except redis.ConnectionError as ex: LOG.info("Redis connection error: %s : %s", agent_name, ex) continue except pickle.PickleError as ex: LOG.info("Could not decode rollout request %s, %s", agent_name, ex) continue except Exception as ex: LOG.info("Trainer publish worker error: %s, %s", agent_name, ex) continue def memory_purge(self): '''Purge Redis Memory''' return self.data_client.memory_purge() def fetch(self, num_consecutive_playing_steps=None): ''' Retrieves the data from the rollout worker num_consecutive_playing_steps - Struct containing the number of episodes to collect before performing a training iteration ''' episode_counter = 0 step_counter = 0 self.episode_req = 0 # Clear any left over Episodes data in queue from previous fetch [agent_queue.get() for agent_queue in self.data_queues.values() if not agent_queue.empty()] self.request_data = True [event.set() for event in self.request_events.values()] self.rollout_steps = dict.fromkeys(self.rollout_steps, 0) self.total_episodes_in_rollout = 0 while episode_counter <= num_consecutive_playing_steps.num_steps: try: objs = {k: v.get() for k, v in self.data_queues.items()} if all(obj[0] == episode_counter and isinstance(obj[1], Episode) for obj in objs.values()): episode_counter += 1 step_counter += sum(obj[1].length() for obj in objs.values()) self.episode_req = episode_counter if step_counter <= MAX_MEMORY_STEPS: self.rollout_steps = {k: self.rollout_steps[k] + objs[k][1].length() for k in self.rollout_steps.keys()} self.total_episodes_in_rollout += 1 transition_iters = {k: iter(v[1].transitions) for k, v in objs.items()} transition = {k: next(v, None) for k, v in transition_iters.items()} while any(transition.values()): yield transition transition = {k: next(v, None) for k, v in transition_iters.items()} # When we request num_consecutive_playing_steps.num we will get back # 1 more than the requested index this lets us know the rollout worker # has given us all available data elif all(obj[0] == num_consecutive_playing_steps.num_steps + 1 for obj in objs.values()): episode_counter = num_consecutive_playing_steps.num_steps + 1 self.episode_req = 0 self.request_data = False continue [event.set() for event in self.request_events.values()] except Exception as ex: LOG.info("Trainer fetch error: %s", ex) continue def get_endpoint(self): '''Returns a dict with the redis address and port ''' return get_endpoint_helper(self.params.redis_address, self.params.redis_port)
snake.py
import threading, queue, os, copy, time, random, sys, readchar #Board's size height = 50 width = 25 #queue to manage threads inputQueue = queue.Queue() # class Point(object): def __init__(self,x,y): self.x = x self.y = y def plusCoordinate(self,coor): self.x += coor.x self.y += coor.y def setCoordinate(self,coor): self.x = coor.x self.y = coor.y #vector up and down due to reverse of board (y-axis's direction is downward) VectorUp = Point(0,-1) VectorDown = Point(0,1) VectorLeft = Point(-1,0) VectorRight = Point(1,0) # class Snake(object): def __init__(self,head,direct): self.part = [Point(head.x,head.y)] self.direct = direct #in range [1,4] (clockwise) #1:up #2:right #3:down #4:left #point's coordinates of every single snake's part must be in range [0,25] to coordinate y and [0,50] to coordinate x # part[0] will be head of snake #input: self.Snake(object), vector:Point(object) def movement(self,vector): for i in range(len(self.part)-1,0,-1): #star from last element of part[] to element 0th (stop at element 0th) self.part[i].setCoordinate(self.part[i-1]) self.part[0].plusCoordinate(vector) #set element 0th to new coordinate by plusing vector (depend on which kind of movement e.g up,down,ect) # def initField(): field = [[0 for j in range(height)] for i in range(width)] return field # def clearScreen(): os.system("clear") # def getKey(): while True: key = readchar.readkey() inputQueue.put(key) if key == "x" or key == "X": sys.exit() # def checkCollision(snake): if snake.part[0].x > height-1 or snake.part[0].x < 0 or snake.part[0].y > width-1 or snake.part[0].y < 0: return True for i in range(len(snake.part)-1): #range [0,len(snake.part)-1] for j in range(i+1,len(snake.part)): #range [i,len(snake.part)] if snake.part[i].x == snake.part[j].x and snake.part[i].y == snake.part[j].y: return True return False # def mergeToField(field,snake,point): #merge random point to field field[point.y][point.x] = 1 #merge snake to field for i in range(len(snake.part)): if snake.part[i].x <= height-1 and snake.part[i].x >= 0 and snake.part[i].y <= width-1 and snake.part[i].y >= 0: field[snake.part[i].y][snake.part[i].x] = 1 # def display(field,score): clearScreen() print("\rScore: ",score) for i in range(len(field[0])): if i == 0: print("\r ",end = "") print("_ ",end = "") if i == len(field[0]) - 1: print("") #for enter to new line for i in range(len(field)): print("\r||",end = "") if i != len(field) - 1: for j in range(len(field[0])): if field[i][j] != 0: print("\033[0;37;47m[]\033[m",end = "") else: print(" ",end = "") else: for j in range(len(field[0])): if field[i][j] == 0: print("_ ",end = "") else: print("\033[0;37;47m[]\033[m",end = "") print("||") print("\rPress:\n\rW - Go up, S - Go down, A - Go left, D - Go right") print("\rX to exit the game") # def sleep(t): time.sleep(t) # def randomPoint(): x = random.randint(0,49) y = random.randint(0,24) return Point(x,y) #input: snake:Snake(object), key:string #output: Snake(object), int def doMovement(snake,directvector,vectorup,vectordown,vectorleft,vectorright,inputQueue): if not inputQueue.empty(): key = inputQueue.get() if(key == "x" or key == "X"): sys.exit() if (key == "w" or key == "W") and snake.direct != 3: #go up snake.direct = 1 directvector.setCoordinate(vectorup) elif (key == "d" or key == "D") and snake.direct != 4: #go right snake.direct = 2 directvector.setCoordinate(vectorright) elif (key == "s" or key == "S") and snake.direct != 1: #go down snake.direct = 3 directvector.setCoordinate(vectordown) elif (key == "a" or key == "A") and snake.direct != 2: #go left snake.direct = 4 directvector.setCoordinate(vectorleft) return snake, directvector, inputQueue # def SnakeGame(vectorup,vectordown,vectorleft,vectorright,inputQueue): #initialize field newField = initField() field = copy.deepcopy(newField) #initialize snake snake = Snake(Point(5,5),2) snake.part.append(Point(snake.part[0].x-1,snake.part[0].y)) snake.part.append(Point(snake.part[0].x-2,snake.part[0].y)) # point = randomPoint() directvector = Point(vectorright.x,vectorright.y) #able to modify then preSec = int(round(time.time()*1000)) score = 0 flag = False tempPoint = Point(0,0) #begin the game mergeToField(field,snake,point) display(field,score) # while True: flag = False if int(round(time.time()*1000)) - preSec >= 200: if not inputQueue.empty(): snake, directvector, inputQueue = doMovement(snake,directvector,vectorup,vectordown,vectorleft,vectorright,inputQueue) inputQueue.queue.clear() if point.x == snake.part[0].x and point.y == snake.part[0].y: score += 20 tempPoint.setCoordinate(snake.part[len(snake.part)-1]) flag = True del point point = randomPoint() snake.movement(directvector) field = copy.deepcopy(newField) mergeToField(field,snake,point) display(field,score) if flag == True: snake.part.append(Point(tempPoint.x,tempPoint.y)) preSec = int(round(time.time()*1000)) if checkCollision(snake): print("\r\033[1;31mGAME OVER\033[m") sys.exit() sleep(0.01) #initialize threads drive1 = threading.Thread(target=SnakeGame,args=(VectorUp,VectorDown,VectorLeft,VectorRight,inputQueue)) drive2 = threading.Thread(target=getKey) #start threads drive1.start() drive2.start()
camera.py
import configparser import logging import math import os import pathlib import threading import time import glob from contextlib import contextmanager from functools import wraps from io import BytesIO from pathlib import Path from queue import Queue from typing import List import cv2 from PIL import Image, _webp from telegram import Message from configuration import ConfigWrapper from klippy import Klippy from power_device import PowerDevice logger = logging.getLogger(__name__) def cam_light_toggle(func): @wraps(func) def wrapper(self, *args, **kwargs): self.use_light() if self.light_timeout > 0 and self.light_device and not self.light_device.device_state and not self.light_lock.locked(): self.light_timer_event.clear() self.light_lock.acquire() self.light_need_off = True self.light_device.switch_device(True) time.sleep(self.light_timeout) self.light_timer_event.set() self.light_timer_event.wait() # Todo: maybe add try block? result = func(self, *args, **kwargs) self.free_light() def delayed_light_off(): if self.light_requests == 0: if self.light_lock.locked(): self.light_lock.release() self.light_need_off = False self.light_device.switch_device(False) else: logger.debug(f"light requests count: {self.light_requests}") if self.light_need_off and self.light_requests == 0: threading.Timer(1.5, delayed_light_off).start() return result return wrapper class Camera: def __init__(self, config: ConfigWrapper, klippy: Klippy, light_device: PowerDevice, logging_handler: logging.Handler = None): self.enabled: bool = config.camera.enabled self._host = int(config.camera.host) if str.isdigit(config.camera.host) else config.camera.host self._threads: int = config.camera.threads self._flip_vertically: bool = config.camera.flip_vertically self._flip_horizontally: bool = config.camera.flip_horizontally self._fourcc: str = config.camera.fourcc self._video_duration: int = config.camera.video_duration self._video_buffer_size: int = config.camera.video_buffer_size self._stream_fps: int = config.camera.stream_fps self._klippy: Klippy = klippy # Todo: refactor into timelapse class self._base_dir: str = config.timelapse.base_dir self._ready_dir: str = config.timelapse.ready_dir self._cleanup: bool = config.timelapse.cleanup self._target_fps: int = 15 self._min_lapse_duration: int = 0 self._max_lapse_duration: int = 0 self._last_frame_duration: int = 5 self._light_need_off: bool = False self._light_need_off_lock = threading.Lock() self.light_timeout: int = config.camera.light_timeout self.light_device: PowerDevice = light_device self._camera_lock = threading.Lock() self.light_lock = threading.Lock() self.light_timer_event = threading.Event() self.light_timer_event.set() self._hw_accel: bool = False if config.camera.picture_quality == 'low': self._img_extension: str = 'jpeg' elif config.camera.picture_quality == 'high': self._img_extension: str = 'webp' else: self._img_extension: str = config.camera.picture_quality self._light_requests: int = 0 self._light_request_lock = threading.Lock() if self._flip_vertically and self._flip_horizontally: self._flip = -1 elif self._flip_horizontally: self._flip = 1 elif self._flip_vertically: self._flip = 0 if config.camera.rotate == '90_cw': self._rotate_code: int = cv2.ROTATE_90_CLOCKWISE elif config.camera.rotate == '90_ccw': self._rotate_code: int = cv2.ROTATE_90_COUNTERCLOCKWISE elif config.camera.rotate == '180': self._rotate_code: int = cv2.ROTATE_180 else: self._rotate_code: int = -10 if logging_handler: logger.addHandler(logging_handler) if config.bot.debug: logger.setLevel(logging.DEBUG) logger.debug(cv2.getBuildInformation()) os.environ["OPENCV_VIDEOIO_DEBUG"] = "1" # Fixme: deprecated! use T-API https://learnopencv.com/opencv-transparent-api/ if cv2.ocl.haveOpenCL(): logger.debug('OpenCL is available') cv2.ocl.setUseOpenCL(True) logger.debug(f'OpenCL in OpenCV is enabled: {cv2.ocl.useOpenCL()}') cv2.setNumThreads(self._threads) self.cam_cam = cv2.VideoCapture() self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1) @property def light_need_off(self) -> bool: with self._light_need_off_lock: return self._light_need_off @light_need_off.setter def light_need_off(self, new_value: bool): with self._light_need_off_lock: self._light_need_off = new_value @property def lapse_dir(self) -> str: return f'{self._base_dir}/{self._klippy.printing_filename_with_time}' @property def light_requests(self) -> int: with self._light_request_lock: return self._light_requests def use_light(self): with self._light_request_lock: self._light_requests += 1 def free_light(self): with self._light_request_lock: self._light_requests -= 1 @property def target_fps(self) -> int: return self._target_fps @target_fps.setter def target_fps(self, new_value: int): self._target_fps = new_value @property def min_lapse_duration(self) -> int: return self._min_lapse_duration @min_lapse_duration.setter def min_lapse_duration(self, new_value: int): if new_value >= 0: self._min_lapse_duration = new_value @property def max_lapse_duration(self) -> int: return self._max_lapse_duration @max_lapse_duration.setter def max_lapse_duration(self, new_value: int): if new_value >= 0: self._max_lapse_duration = new_value @property def last_frame_duration(self) -> int: return self._last_frame_duration @last_frame_duration.setter def last_frame_duration(self, new_value: int): if new_value >= 0: self._last_frame_duration = new_value @staticmethod def _create_thumb(image) -> BytesIO: # cv2.cvtColor cause segfaults! img = Image.fromarray(image[:, :, [2, 1, 0]]) bio = BytesIO() bio.name = 'thumbnail.jpeg' img.thumbnail((320, 320)) img.save(bio, 'JPEG', quality=100, optimize=True) bio.seek(0) img.close() del img return bio @cam_light_toggle def take_photo(self) -> BytesIO: with self._camera_lock: self.cam_cam.open(self._host) self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1) success, image = self.cam_cam.read() self.cam_cam.release() if not success: logger.debug("failed to get camera frame for photo") # Todo: resize to cam resolution! img = Image.open('../imgs/nosignal.png') else: if self._hw_accel: image_um = cv2.UMat(image) if self._flip_vertically or self._flip_horizontally: image_um = cv2.flip(image_um, self._flip) img = Image.fromarray(cv2.UMat.get(cv2.cvtColor(image_um, cv2.COLOR_BGR2RGB))) image_um = None del image_um else: if self._flip_vertically or self._flip_horizontally: image = cv2.flip(image, self._flip) # Todo: check memory leaks if self._rotate_code > -10: image = cv2.rotate(image, rotateCode=self._rotate_code) # # cv2.cvtColor cause segfaults! # rgb = image[:, :, ::-1] rgb = image[:, :, [2, 1, 0]] img = Image.fromarray(rgb) rgb = None del rgb image = None del image, success bio = BytesIO() bio.name = f'status.{self._img_extension}' if self._img_extension in ['jpg', 'jpeg']: img.save(bio, 'JPEG', quality=80, subsampling=0) elif self._img_extension == 'webp': # https://github.com/python-pillow/Pillow/issues/4364 _webp.HAVE_WEBPANIM = False img.save(bio, 'WebP', quality=0, lossless=True) elif self._img_extension == 'png': img.save(bio, 'PNG') bio.seek(0) img.close() del img return bio @contextmanager def take_video_generator(self): (video_bio, thumb_bio, width, height) = self.take_video() try: yield video_bio, thumb_bio, width, height finally: video_bio.close() thumb_bio.close() @cam_light_toggle def take_video(self) -> (BytesIO, BytesIO, int, int): def process_video_frame(frame_local): if self._flip_vertically or self._flip_horizontally: if self._hw_accel: frame_loc_ = cv2.UMat(frame_local) frame_loc_ = cv2.flip(frame_loc_, self._flip) frame_local = cv2.UMat.get(frame_loc_) del frame_loc_ else: frame_local = cv2.flip(frame_local, self._flip) # Todo: check memory leaks if self._rotate_code > -10: frame_local = cv2.rotate(frame_local, rotateCode=self._rotate_code) return frame_local def write_video(): cv2.setNumThreads(self._threads) out = cv2.VideoWriter(filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=fps_cam, frameSize=(width, height)) while video_lock.locked(): try: frame_local = frame_queue.get(block=False) except Exception as ex: logger.warning(f'Reading video frames queue exception {ex.with_traceback}') frame_local = frame_queue.get() out.write(process_video_frame(frame_local)) # frame_local = None # del frame_local while not frame_queue.empty(): frame_local = frame_queue.get() out.write(process_video_frame(frame_local)) # frame_local = None # del frame_local out.release() video_written_event.set() with self._camera_lock: cv2.setNumThreads(self._threads) # TOdo: check self set and remove! self.cam_cam.open(self._host) self.cam_cam.set(cv2.CAP_PROP_BUFFERSIZE, 1) success, frame = self.cam_cam.read() if not success: logger.debug("failed to get camera frame for video") # Todo: get picture from imgs? frame = process_video_frame(frame) height, width, channels = frame.shape thumb_bio = self._create_thumb(frame) del frame, channels fps_cam = self.cam_cam.get(cv2.CAP_PROP_FPS) if self._stream_fps == 0 else self._stream_fps filepath = os.path.join('/tmp/', 'video.mp4') frame_queue = Queue(fps_cam * self._video_buffer_size) video_lock = threading.Lock() video_written_event = threading.Event() video_written_event.clear() video_lock.acquire() threading.Thread(target=write_video, args=()).start() t_end = time.time() + self._video_duration while success and time.time() <= t_end: success, frame_loc = self.cam_cam.read() try: frame_queue.put(frame_loc, block=False) except Exception as ex: logger.warning(f'Writing video frames queue exception {ex.with_traceback}') frame_queue.put(frame_loc) # frame_loc = None # del frame_loc video_lock.release() video_written_event.wait() self.cam_cam.release() video_bio = BytesIO() video_bio.name = 'video.mp4' with open(filepath, 'rb') as fh: video_bio.write(fh.read()) os.remove(filepath) video_bio.seek(0) return video_bio, thumb_bio, width, height def take_lapse_photo(self) -> None: # Todo: check for space available? Path(self.lapse_dir).mkdir(parents=True, exist_ok=True) # never add self in params there! with self.take_photo() as photo: filename = f'{self.lapse_dir}/{time.time()}.{self._img_extension}' with open(filename, "wb") as outfile: outfile.write(photo.getvalue()) photo.close() def create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str): return self._create_timelapse(printing_filename, gcode_name, info_mess) def create_timelapse_for_file(self, filename: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str): return self._create_timelapse(filename, filename, info_mess) def _calculate_fps(self, frames_count: int) -> int: actual_duration = frames_count / self._target_fps # Todo: check _max_lapse_duration > _min_lapse_duration if (self._min_lapse_duration == 0 and self._max_lapse_duration == 0) or (self._min_lapse_duration <= actual_duration <= self._max_lapse_duration and self._max_lapse_duration > 0) or ( actual_duration > self._min_lapse_duration and self._max_lapse_duration == 0): return self._target_fps elif actual_duration < self._min_lapse_duration and self._min_lapse_duration > 0: fps = math.ceil(frames_count / self._min_lapse_duration) return fps if fps >= 1 else 1 elif actual_duration > self._max_lapse_duration > 0: return math.ceil(frames_count / self._max_lapse_duration) else: logger.error(f"Unknown fps calculation state for durations min:{self._min_lapse_duration} and max:{self._max_lapse_duration} and actual:{actual_duration}") return self._target_fps def _create_timelapse(self, printing_filename: str, gcode_name: str, info_mess: Message) -> (BytesIO, BytesIO, int, int, str, str): if not printing_filename: raise ValueError(f'Gcode file name is empty') while self.light_need_off: time.sleep(1) lapse_dir = f'{self._base_dir}/{printing_filename}' if not Path(f'{lapse_dir}/lapse.lock').is_file(): open(f'{lapse_dir}/lapse.lock', mode='a').close() # Todo: check for nonempty photos! photos = glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}') photos.sort(key=os.path.getmtime) photo_count = len(photos) if photo_count == 0: raise ValueError(f"Empty photos list for {printing_filename} in lapse path {lapse_dir}") info_mess.edit_text(text=f"Creating thumbnail") last_photo = photos[-1] img = cv2.imread(last_photo) height, width, layers = img.shape thumb_bio = self._create_thumb(img) video_filepath = f'{lapse_dir}/{printing_filename}.mp4' if Path(video_filepath).is_file(): os.remove(video_filepath) lapse_fps = self._calculate_fps(photo_count) with self._camera_lock: cv2.setNumThreads(self._threads) # TOdo: check self set and remove! out = cv2.VideoWriter(video_filepath, fourcc=cv2.VideoWriter_fourcc(*self._fourcc), fps=lapse_fps, frameSize=(width, height)) info_mess.edit_text(text=f"Images recoding") last_update_time = time.time() for fnum, filename in enumerate(photos): if time.time() >= last_update_time + 3: info_mess.edit_text(text=f"Images recoded {fnum}/{photo_count}") last_update_time = time.time() out.write(cv2.imread(filename)) info_mess.edit_text(text=f"Repeating last image for {self._last_frame_duration} seconds") for _ in range(lapse_fps * self._last_frame_duration): out.write(img) out.release() cv2.destroyAllWindows() del out del photos, img, layers # Todo: some error handling? video_bio = BytesIO() video_bio.name = f'{printing_filename}.mp4' with open(video_filepath, 'rb') as fh: video_bio.write(fh.read()) if self._ready_dir and os.path.isdir(self._ready_dir): info_mess.edit_text(text=f"Copy lapse to target ditectory") with open(f"{self._ready_dir}/{printing_filename}.mp4", 'wb') as cpf: cpf.write(video_bio.getvalue()) video_bio.seek(0) os.remove(f'{lapse_dir}/lapse.lock') if self._cleanup: info_mess.edit_text(text=f"Performing cleanups") for filename in glob.glob(f'{glob.escape(lapse_dir)}/*.{self._img_extension}'): os.remove(filename) if video_bio.getbuffer().nbytes < 52428800: for filename in glob.glob(f'{glob.escape(lapse_dir)}/*'): os.remove(filename) Path(lapse_dir).rmdir() return video_bio, thumb_bio, width, height, video_filepath, gcode_name def clean(self) -> None: if self._cleanup and self._klippy.printing_filename and os.path.isdir(self.lapse_dir): for filename in glob.glob(f'{glob.escape(self.lapse_dir)}/*'): os.remove(filename) # Todo: refactor into timelapse class # Todo: check for 64 symbols length in lapse names def detect_unfinished_lapses(self) -> List[str]: # Todo: detect unstarted timelapse builds? folder with pics and no mp4 files return list(map(lambda el: pathlib.PurePath(el).parent.name, glob.glob(f'{self._base_dir}/*/*.lock')))
test.py
# -*- coding: utf-8 -*- import redis import unittest from hotels import hotels import random import time from RLTest import Env from includes import * from common import getConnectionByEnv, waitForIndex, toSortedFlatList, assertInfoField, check_server_version, check_module_version # this tests is not longer relevant # def testAdd(env): # if env.is_cluster(): # raise unittest.SkipTest() # r = env # env.assertOk(r.execute_command( # 'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) # env.assertTrue(r.exists('idx:idx')) # env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', # 'title', 'hello world', # 'body', 'lorem ist ipsum')) # for _ in r.retry_with_rdb_reload(): # prefix = 'ft' # env.assertExists(prefix + ':idx/hello') # env.assertExists(prefix + ':idx/world') # env.assertExists(prefix + ':idx/lorem') def testAddErrors(env): env.expect('ft.create idx ON HASH schema foo text bar numeric sortable').equal('OK') env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword') env.expect('ft.add idx doc1').error().contains("wrong number of arguments") env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1") env.expect('ft.add idx doc1 1.0').error().contains("No field list found") env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name") def assertEqualIgnoreCluster(env, val1, val2): # todo: each test that uses this function should be switch back to env.assertEqual once fix # issues on coordinator if env.isCluster(): return env.assertEqual(val1, val2) def testConditionalUpdate(env): env.assertOk(env.cmd( 'ft.create', 'idx','ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable')) env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'fields', 'foo', 'hello', 'bar', '123')) env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123')) env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123')) env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123')) env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if', '@foo == "world"', 'fields', 'bar', '234')) env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if', '@bar == 234', 'fields', 'foo', 'hello', 'bar', '123')) # Ensure that conditionals are ignored if the document doesn't exist env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1', 'IF', '@bar > 42', 'FIELDS', 'bar', '15')) # Ensure that it fails if we try again, because it already exists env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1', 'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15')) # Ensure that it fails because we're not using 'REPLACE' with env.assertResponseError(): env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1', 'IF', '@bar > 42', 'FIELDS', 'bar', '15')) def testUnionIdList(env): # Regression test for https://github.com/RediSearch/RediSearch/issues/306 r = env N = 100 env.assertOk(r.execute_command( "ft.create", "test", 'ON', 'HASH', "SCHEMA", "tags", "TAG", "waypoint", "GEO")) env.assertOk(r.execute_command( "ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244")) env.assertOk(r.execute_command( "ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667")) r.cmd('ft.search', 'test', '@tags:{ontario}') res = r.execute_command( 'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent') env.assertEqual(res, [2L, '1', '2']) def testAttributes(env): env.assertOk(env.cmd('ft.create', 'idx','ON', 'HASH', 'schema', 'title', 'text', 'body', 'text')) env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'title', 't1 t2', 'body', 't3 t4 t5')) env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'body', 't1 t2', 'title', 't3 t5')) res = env.cmd( 'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent') env.assertListEqual([2L, 'doc2', 'doc1'], res) res = env.cmd( 'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent') env.assertListEqual([2L, 'doc1', 'doc2'], res) res = env.cmd( 'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent') env.assertListEqual([2L, 'doc2', 'doc1'], res) res = env.cmd( 'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent') env.assertListEqual([1L, 'doc2'], res) res = env.cmd( 'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent') env.assertListEqual([0], res) def testUnion(env): N = 100 r = env env.assertOk(r.execute_command( 'ft.create', 'idx','ON', 'HASH', 'schema', 'f', 'text')) for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world' if i % 2 == 0 else 'hallo werld')) for _ in r.retry_with_rdb_reload(): waitForIndex(r, 'idx') res = r.execute_command( 'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100') env.assertEqual(N + 1, len(res)) env.assertEqual(N, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100') env.assertEqual(51, len(res)) env.assertEqual(50, res[0]) res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(51, len(res)) env.assertEqual(50, res[0]) res = r.execute_command( 'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(101, len(res)) env.assertEqual(100, res[0]) res = r.execute_command( 'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(101, len(res)) env.assertEqual(100, res[0]) res = r.execute_command( 'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(101, len(res)) env.assertEqual(100, res[0]) res = r.execute_command( 'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(51, len(res)) env.assertEqual(50, res[0]) res = r.execute_command( 'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100') env.assertEqual(101, len(res)) env.assertEqual(100, res[0]) def testSearch(env): r = env r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text').ok() r.expect('ft.add', 'idx', 'doc1', 0.5, 'fields','title', 'hello world', 'body', 'lorem ist ipsum').ok() r.expect('ft.add', 'idx', 'doc2', 1.0, 'fields', 'title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem').ok() # order of documents might change after reload for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') res = r.execute_command('ft.search', 'idx', 'hello') expected = [2L, 'doc2', ['title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem'], 'doc1', ['title', 'hello world', 'body', 'lorem ist ipsum']] env.assertEqual(toSortedFlatList(res), toSortedFlatList(expected)) # Test empty query res = r.execute_command('ft.search', 'idx', '') env.assertListEqual([0], res) # Test searching with no content res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent') env.assertTrue(len(res) == 3) expected = ['doc2', 'doc1'] env.assertEqual(res[0], 2L) for item in expected: env.assertIn(item, res) # Test searching WITHSCORES res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES') env.assertEqual(len(res), 7) env.assertEqual(res[0], 2L) for item in expected: env.assertIn(item, res) env.assertTrue(float(res[2]) > 0) env.assertTrue(float(res[5]) > 0) # Test searching WITHSCORES NOCONTENT res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT') env.assertEqual(len(res), 5) env.assertEqual(res[0], 2L) for item in expected: env.assertIn(item, res) env.assertTrue(float(res[2]) > 0) env.assertTrue(float(res[4]) > 0) def testGet(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'text')) env.expect('ft.get').error().contains("wrong number of arguments") env.expect('ft.get', 'idx').error().contains("wrong number of arguments") env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments") env.expect('ft.mget').error().contains("wrong number of arguments") env.expect('ft.mget', 'idx').error().contains("wrong number of arguments") env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments") env.expect('ft.get fake_idx foo').error().contains("Unknown Index name") env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name") for i in range(100): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'hello world', 'bar', 'wat wat')) for i in range(100): res = r.execute_command('ft.get', 'idx', 'doc%d' % i) env.assertIsNotNone(res) env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res)) env.assertIsNone(r.execute_command( 'ft.get', 'idx', 'doc%dsdfsd' % i)) env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name") rr = r.execute_command( 'ft.mget', 'idx', *('doc%d' % i for i in range(100))) env.assertEqual(len(rr), 100) for res in rr: env.assertIsNotNone(res) env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res)) rr = r.execute_command( 'ft.mget', 'idx', *('doc-%d' % i for i in range(100))) env.assertEqual(len(rr), 100) for res in rr: env.assertIsNone(res) # Verify that when a document is deleted, GET returns NULL r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document r.cmd('ft.del', 'idx', 'doc11') assert r.cmd('ft.del', 'idx', 'coverage') == 0 res = r.cmd('ft.get', 'idx', 'doc10') r.assertEqual(None, res) res = r.cmd('ft.mget', 'idx', 'doc10') r.assertEqual([None], res) res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12') r.assertIsNone(res[0]) r.assertIsNone(res[1]) r.assertTrue(not not res[2]) env.expect('ft.add idx doc 0.1 language arabic payload redislabs fields foo foo').ok() env.expect('ft.get idx doc').equal(['foo', 'foo']) res = env.cmd('hgetall doc') env.assertEqual(set(res), set(['foo', 'foo', '__score', '0.1', '__language', 'arabic', '__payload', 'redislabs'])) def testDelete(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text')) for i in range(100): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world')) env.expect('ft.del', 'fake_idx', 'doc1').error() for i in range(100): # the doc hash should exist now r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError() # Delete the actual docs only half of the time env.assertEqual(1, r.execute_command( 'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else '')) # second delete should return 0 env.assertEqual(0, r.execute_command( 'ft.del', 'idx', 'doc%d' % i)) # second delete should return 0 # TODO: return 0 if doc wasn't found #env.assertEqual(0, r.execute_command( # 'ft.del', 'idx', 'doc%d' % i)) # After del with DD the doc hash should not exist if i % 2 == 0: env.assertFalse(r.exists('doc%d' % i)) else: r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError() res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100) env.assertNotIn('doc%d' % i, res) env.assertEqual(res[0], 100 - i - 1) env.assertEqual(len(res), 100 - i) # test reinsertion env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world')) res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100) env.assertIn('doc%d' % i, res) env.assertEqual(1, r.execute_command( 'ft.del', 'idx', 'doc%d' % i)) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') did = 'rrrr' env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields', 'f', 'hello world')) env.assertEqual(1, r.execute_command('ft.del', 'idx', did)) env.assertEqual(0, r.execute_command('ft.del', 'idx', did)) env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields', 'f', 'hello world')) env.assertEqual(1, r.execute_command('ft.del', 'idx', did)) env.assertEqual(0, r.execute_command('ft.del', 'idx', did)) def testReplace(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f', 'hello world')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'f', 'hello world')) res = r.execute_command( 'ft.search', 'idx', 'hello world') env.assertEqual(2, res[0]) with env.assertResponseError(): # make sure we can't insert a doc twice res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f', 'hello world') # now replace doc1 with a different content env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields', 'f', 'goodbye universe')) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') # make sure the query for hello world does not return the replaced # document res = r.execute_command( 'ft.search', 'idx', 'hello world', 'nocontent') env.assertEqual(1, res[0]) env.assertEqual('doc2', res[1]) # search for the doc's new content res = r.execute_command( 'ft.search', 'idx', 'goodbye universe', 'nocontent') env.assertEqual(1, res[0]) env.assertEqual('doc1', res[1]) def testDrop(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo')) for i in range(100): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar', 'g', '19.04,47.497')) keys = r.keys('*') env.assertGreaterEqual(len(keys), 100) env.assertOk(r.execute_command('ft.drop', 'idx')) keys = r.keys('*') env.assertEqual(0, len(keys)) env.flush() # Now do the same with KEEPDOCS env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo')) for i in range(100): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar', 'g', '19.04,47.497')) keys = r.keys('*') env.assertGreaterEqual(len(keys), 100) if not env.is_cluster(): env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS')) keys = r.keys('*') env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53', 'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys)) env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments") def testDelete(env): r = env r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok() for i in range(100): r.expect('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar', 'g', '19.04,47.497').ok() keys = r.keys('*') env.assertGreaterEqual(len(keys), 100) r.expect('FT.DROPINDEX', 'idx', 'dd').ok() keys = r.keys('*') env.assertEqual(0, len(keys)) env.flush() # Now do the same with KEEPDOCS env.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok() for i in range(100): r.expect('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar', 'g', '19.04,47.497').ok() keys = r.keys('*') env.assertGreaterEqual(len(keys), 100) if not env.is_cluster(): r.expect('FT.DROPINDEX', 'idx').ok() keys = r.keys('*') env.assertListEqual(sorted("doc%d" %k for k in range(100)), sorted(keys)) env.expect('FT.DROPINDEX', 'idx', 'dd', '666').error().contains("wrong number of arguments") def testCustomStopwords(env): r = env # Index with default stopwords env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text')) # Index with custom stopwords env.assertOk(r.execute_command('ft.create', 'idx2', 'ON', 'HASH', 'stopwords', 2, 'hello', 'world', 'schema', 'foo', 'text')) assertInfoField(env, 'idx2', 'stopwords_list', ['hello', 'world']) # Index with NO stopwords env.assertOk(r.execute_command('ft.create', 'idx3', 'ON', 'HASH', 'stopwords', 0, 'schema', 'foo', 'text')) assertInfoField(env, 'idx3', 'stopwords_list', []) # 2nd Index with NO stopwords - check global is used and freed env.assertOk(r.execute_command('ft.create', 'idx4', 'ON', 'HASH', 'stopwords', 0, 'schema', 'foo', 'text')) #for idx in ('idx', 'idx2', 'idx3'): env.assertOk(r.execute_command( 'ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'hello world')) env.assertOk(r.execute_command( 'ft.add', 'idx', 'doc2', 1.0, 'fields', 'foo', 'to be or not to be')) for _ in r.retry_with_rdb_reload(): waitForIndex(r, 'idx') # Normal index should return results just for 'hello world' env.assertEqual([1, 'doc1'], r.execute_command( 'ft.search', 'idx', 'hello world', 'nocontent')) env.assertEqual([0], r.execute_command( 'ft.search', 'idx', 'to be or not', 'nocontent')) # Custom SW index should return results just for 'to be or not' env.assertEqual([0], r.execute_command( 'ft.search', 'idx2', 'hello world', 'nocontent')) env.assertEqual([1, 'doc2'], r.execute_command( 'ft.search', 'idx2', 'to be or not', 'nocontent')) # No SW index should return results for both env.assertEqual([1, 'doc1'], r.execute_command( 'ft.search', 'idx3', 'hello world', 'nocontent')) env.assertEqual([1, 'doc2'], r.execute_command( 'ft.search', 'idx3', 'to be or not', 'nocontent')) def testStopwords(env): # This test was taken from Python's tests, and failed due to some changes # made earlier env.cmd('ft.create', 'idx', 'ON', 'HASH', 'stopwords', 3, 'foo', 'bar', 'baz', 'schema', 'txt', 'text') env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar') env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world') r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent') r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent') env.assertEqual(0, r1[0]) env.assertEqual(1, r2[0]) def testNoStopwords(env): # This test taken from Java's test suite env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text') for i in range(100): env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields', 'title', 'hello world' if i % 2 == 0 else 'hello worlds') res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT') env.assertEqual(100, res[0]) res = env.cmd('ft.search', 'idx', 'hello a world', 'VERBATIM', 'NOCONTENT') env.assertEqual(50, res[0]) res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS') env.assertEqual(0, res[0]) def testOptional(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'hello wat woot')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'foo', 'hello world woot')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1.0, 'fields', 'foo', 'hello world werld')) expected = [3L, 'doc1', 'doc2', 'doc3'] res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent') env.assertEqual(res, expected) res = r.execute_command( 'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX') env.assertEqual([2L, 'doc2', 'doc3'], res) res = r.execute_command( 'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX') env.assertEqual(res, expected) res = r.execute_command( 'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX') env.assertEqual(res, expected) res = r.execute_command( 'ft.search', 'idx', '~world ~werld hello', 'withscores', 'nocontent', 'scorer', 'DISMAX') env.assertEqual(res, [3L, 'doc3', '3', 'doc2', '2', 'doc1', '1']) def testExplain(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable')) q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]' res = r.execute_command('ft.explain', 'idx', q) # print res.replace('\n', '\\n') # expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n""" # expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n""" expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n""" env.assertEqual(res, expected) # expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', ''] if env.is_cluster(): raise unittest.SkipTest() res = env.cmd('ft.explainCli', 'idx', q) expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', ''] env.assertEqual(expected, res) def testNoIndex(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'num', 'numeric', 'sortable', 'noindex', 'extra', 'text', 'noindex', 'sortable')) if not env.isCluster(): # to specific check on cluster, todo : change it to be generic enough res = env.cmd('ft.info', 'idx') env.assertEqual(res[7][1][4], 'NOINDEX') env.assertEqual(res[7][2][6], 'NOINDEX') env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields', 'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum')) res = r.execute_command( 'ft.search', 'idx', 'hello world', 'nocontent') env.assertListEqual([1, 'doc1'], res) res = r.execute_command( 'ft.search', 'idx', 'lorem ipsum', 'nocontent') env.assertListEqual([0], res) res = r.execute_command( 'ft.search', 'idx', '@extra:hello', 'nocontent') env.assertListEqual([0], res) res = r.execute_command( 'ft.search', 'idx', '@num:[1 1]', 'nocontent') env.assertListEqual([0], res) def testPartial(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'SCORE_FIELD', '__score', 'schema', 'foo', 'text', 'num', 'numeric', 'sortable', 'noindex', 'extra', 'text', 'noindex')) # print r.execute_command('ft.info', 'idx') env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields', 'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields', 'foo', 'hello world', 'num', 2, 'extra', 'abba')) res = r.execute_command('ft.search', 'idx', 'hello world', 'sortby', 'num', 'asc', 'nocontent', 'withsortkeys') env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res) res = r.execute_command('ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc', 'nocontent', 'withsortkeys') env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res) # Updating non indexed fields doesn't affect search results env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial', 'fields', 'num', 3, 'extra', 'jorem gipsum')) env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial', 'fields', 'num1', 'redis').equal('OK') res = r.execute_command( 'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',) assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3','extra', 'jorem gipsum'], 'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res) res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'withscores') # Updating only indexed field affects search results env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial', 'fields', 'foo', 'wat wet')) res = r.execute_command( 'ft.search', 'idx', 'hello world', 'nocontent') env.assertListEqual([1L, 'doc2'], res) res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent') env.assertListEqual([1L, 'doc1'], res) # Test updating of score and no fields res = r.execute_command( 'ft.search', 'idx', 'wat', 'nocontent', 'withscores') env.assertLess(float(res[2]), 1) # env.assertListEqual([1L, 'doc1'], res) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0', 'replace', 'partial', 'fields')) res = r.execute_command( 'ft.search', 'idx', 'wat', 'nocontent', 'withscores') # We reindex though no new fields, just score is updated. this effects score env.assertEqual(float(res[2]), 1) # Test updating payloads res = r.execute_command( 'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads') env.assertIsNone(res[2]) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0', 'replace', 'partial', 'payload', 'foobar', 'fields')) res = r.execute_command( 'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads') env.assertEqual('foobar', res[2]) def testPaging(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable')) N = 100 for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields', 'foo', 'hello', 'bar', i)) chunk = 7 offset = 0 while True: res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk) env.assertEqual(res[0], N) if offset + chunk > N: env.assertTrue(len(res) - 1 <= chunk) break env.assertEqual(len(res), chunk + 1) for n, id in enumerate(res[1:]): env.assertEqual(int(id), N - 1 - (offset + n)) offset += chunk chunk = random.randrange(1, 10) res = r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10) env.assertEqual(res[0], N) env.assertEqual(len(res), 1) with env.assertResponseError(): r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1) with env.assertResponseError(): r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10) with env.assertResponseError(): r.execute_command( 'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000) def testPrefix(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text')) N = 100 for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'constant term%d' % (random.randrange(0, 5)))) for _ in r.retry_with_rdb_reload(): waitForIndex(r, 'idx') res = r.execute_command( 'ft.search', 'idx', 'constant term', 'nocontent') env.assertEqual([0], res) res = r.execute_command( 'ft.search', 'idx', 'constant term*', 'nocontent') env.assertEqual(N, res[0]) res = r.execute_command( 'ft.search', 'idx', 'const* term*', 'nocontent') env.assertEqual(N, res[0]) res = r.execute_command( 'ft.search', 'idx', 'constant term1*', 'nocontent') env.assertGreater(res[0], 2) res = r.execute_command( 'ft.search', 'idx', 'const* -term*', 'nocontent') env.assertEqual([0], res) res = r.execute_command( 'ft.search', 'idx', 'constant term9*', 'nocontent') env.assertEqual([0], res) def testSortBy(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable')) N = 100 for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'hello%03d world' % i, 'bar', 100 - i)) for _ in r.retry_with_rdb_reload(): waitForIndex(r, 'idx') res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo') env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res) res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc') env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96', 'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res) res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc') env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res) res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc') env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96', 'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5') env.assertEqual( [100L, 'doc2', '1', 'doc3', '1', 'doc4', '1', 'doc5', '1', 'doc6', '1'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5) env.assertListEqual( [100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5) env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96', '$hello096 world', 'doc95', '$hello095 world'], res) def testSortByWithoutSortable(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'baz', 'text', 'sortable')) N = 100 for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'hello%03d world' % i, 'bar', 100 - i)) for _ in r.retry_with_rdb_reload(): waitForIndex(r, 'idx') # test text res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo') env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res) res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc') env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96', 'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5) env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96', '$hello096 world', 'doc95', '$hello095 world'], res) # test numeric res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc') env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res) res = r.execute_command( 'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc') env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96', 'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5') env.assertEqual( [100L, 'doc2', '1', 'doc3', '1', 'doc4', '1', 'doc5', '1', 'doc6', '1'], res) res = r.execute_command('ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5) env.assertListEqual( [100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res) def testNot(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text')) N = 10 for i in range(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'constant term%d' % (random.randrange(0, 5)))) for i in range(5): inclusive = r.execute_command( 'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N) exclusive = r.execute_command( 'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N) exclusive2 = r.execute_command( 'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N) exclusive3 = r.execute_command( 'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N) env.assertNotEqual(inclusive[0], N) env.assertEqual(inclusive[0] + exclusive[0], N) env.assertEqual(exclusive3[0], exclusive2[0]) env.assertEqual(exclusive3[0], exclusive[0]) s1, s2, s3, s4 = set(inclusive[1:]), set( exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:]) env.assertTrue(s1.difference(s2) == s1) env.assertTrue(s1.difference(s3) == s1) env.assertTrue(s1.difference(s4) == s1) env.assertTrue(s2 == s3) env.assertTrue(s2 == s4) env.assertTrue(s2.intersection(s1) == set()) env.assertTrue(s3.intersection(s1) == set()) env.assertTrue(s4.intersection(s1) == set()) # NOT on a non existing term env.assertEqual(r.execute_command( 'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N) # not on env term env.assertEqual(r.execute_command( 'ft.search', 'idx', 'constant -constant', 'nocontent'), [0]) env.assertEqual(r.execute_command( 'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0]) # env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N) def testNestedIntersection(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text')) for i in range(20): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz')) res = [ r.execute_command('ft.search', 'idx', 'foo bar baz gaz', 'nocontent'), r.execute_command('ft.search', 'idx', '@a:foo @b:bar @c:baz @d:gaz', 'nocontent'), r.execute_command('ft.search', 'idx', '@b:bar @a:foo @c:baz @d:gaz', 'nocontent'), r.execute_command('ft.search', 'idx', '@c:baz @b:bar @a:foo @d:gaz', 'nocontent'), r.execute_command('ft.search', 'idx', '@d:gaz @c:baz @b:bar @a:foo', 'nocontent'), r.execute_command( 'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'), r.execute_command( 'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'), r.execute_command( 'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'), r.execute_command( 'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'), r.execute_command('ft.search', 'idx', 'foo (bar baz gaz)', 'nocontent'), r.execute_command('ft.search', 'idx', 'foo (bar (baz gaz))', 'nocontent'), r.execute_command('ft.search', 'idx', 'foo (bar (foo bar) (foo bar))', 'nocontent'), r.execute_command('ft.search', 'idx', 'foo (foo (bar baz (gaz)))', 'nocontent'), r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')] for i, r in enumerate(res): # print i, res[0], r env.assertListEqual(res[0], r) def testInKeys(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text')) for i in range(200): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', 'hello world')) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') for keys in ( ['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [ 'doc%d' % i for i in range(99, 0, -5)] ): res = r.execute_command( 'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys) env.assertEqual(len(keys), res[0]) env.assertTrue(all((k in res for k in keys))) env.assertEqual(0, r.execute_command( 'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0]) with env.assertResponseError(): env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99) with env.assertResponseError(): env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1) with env.assertResponseError(): env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo') def testSlopInOrder(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields', 'title', 't1 t2')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields', 'title', 't1 t3 t2')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields', 'title', 't1 t3 t4 t2')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields', 'title', 't1 t3 t4 t5 t2')) res = r.execute_command( 'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent') env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:])) res = r.execute_command( 'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent') env.assertEqual(1, res[0]) env.assertEqual('doc1', res[1]) env.assertEqual(0, r.execute_command( 'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0]) env.assertEqual(1, r.execute_command( 'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0]) env.assertEqual(2, r.execute_command( 'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0]) env.assertEqual(3, r.execute_command( 'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0]) env.assertEqual(4, r.execute_command( 'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0]) env.assertEqual(4, r.execute_command( 'ft.search', 'idx', 't1 t2', 'inorder')[0]) env.assertEqual(0, r.execute_command( 'ft.search', 'idx', 't t1', 'inorder')[0]) env.assertEqual(2, r.execute_command( 'ft.search', 'idx', 't1 t2 t3 t4')[0]) env.assertEqual(0, r.execute_command( 'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0]) def testSlopInOrderIssue1986(env): r = env # test with qsort optimization on intersect iterator env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields', 'title', 't1 t2')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields', 'title', 't2 t1')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields', 'title', 't1')) # before fix, both queries returned `doc2` env.assertEqual([1L, 'doc2', ['title', 't2 t1']], r.execute_command( 'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')) env.assertEqual([1L, 'doc1', ['title', 't1 t2']], r.execute_command( 'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')) def testExact(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields', 'title', 'hello world', 'body', 'lorem ist ipsum')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem')) res = r.execute_command( 'ft.search', 'idx', '"hello world"', 'verbatim') env.assertEqual(3, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc1", res[1]) res = r.execute_command( 'ft.search', 'idx', "hello \"another world\"", 'verbatim') env.assertEqual(3, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc2", res[1]) def testGeoErrors(env): env.expect('flushall') env.expect('ft.create idx ON HASH schema name text location geo').equal('OK') env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK') env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L]) # Insert error - works fine with out of keyspace implementation # env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \ # .contains('Could not index geo value') # Query errors env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \ .contains('Bad arguments for <lon>: Could not convert argument to expected type') env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \ .contains('Bad arguments for <lat>: Could not convert argument to expected type') env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \ .contains('Bad arguments for <radius>: Could not convert argument to expected type') env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \ .contains('Unknown distance unit fake') env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \ .contains('GEOFILTER requires 5 arguments') def testGeo(env): r = env gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command( 'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit, 'LIMIT', 0, 20) gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command( 'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit), 'LIMIT', 0, 20) env.assertOk(r.execute_command('ft.create', 'idx', 'ON', 'HASH', 'schema', 'name', 'text', 'location', 'geo')) for i, hotel in enumerate(hotels): env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name', hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1]))) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') res = r.execute_command('ft.search', 'idx', 'hilton') env.assertEqual(len(hotels), res[0]) res = gsearch('hilton', "-0.1757", "51.5156", '1') env.assertEqual(3, res[0]) env.assertIn('hotel2', res) env.assertIn('hotel21', res) env.assertIn('hotel79', res) res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1') env.assertListEqual(sorted(res), sorted(res2)) res = gsearch('hilton', "-0.1757", "51.5156", '10') env.assertEqual(14, res[0]) res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm') env.assertListEqual(sorted(res), sorted(res2)) res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10') env.assertListEqual(sorted(res), sorted(res2)) res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm') env.assertEqual(1, res[0]) env.assertEqual('hotel94', res[1]) res2 = gsearch_inline( 'heathrow', -0.44155, 51.45865, '10', 'm') env.assertListEqual(sorted(res), sorted(res2)) res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km') env.assertEqual(5, res[0]) env.assertIn('hotel94', res) res2 = gsearch_inline( 'heathrow', -0.44155, 51.45865, '10', 'km') env.assertEqual(5, res2[0]) env.assertListEqual(sorted(res), sorted(res2)) res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km') env.assertEqual(3, res[0]) env.assertIn('hotel94', res) res2 = gsearch_inline( 'heathrow', -0.44155, 51.45865, '5', 'km') env.assertListEqual(sorted(res), sorted(res2)) def testTagErrors(env): env.expect("ft.create", "test", 'ON', 'HASH', "SCHEMA", "tags", "TAG").equal('OK') env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK') env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK') def testGeoDeletion(env): if env.is_cluster(): raise unittest.SkipTest() # Can't properly test if deleted on cluster env.expect('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0).ok() env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'g1', 'geo', 'g2', 'geo', 't1', 'text') env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'g1', "-0.1757,51.5156", 'g2', "-0.1757,51.5156", 't1', "hello") env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'g1', "-0.1757,51.5156", 'g2', "-0.1757,51.5156", 't1', "hello") env.cmd('ft.add', 'idx', 'doc3', 1.0, 'fields', 'g1', "-0.1757,51.5156", 't1', "hello") # keys are: "geo:idx/g1" and "geo:idx/g2" env.assertEqual(3, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0])) env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0])) # Remove the first doc env.cmd('ft.del', 'idx', 'doc1') for _ in range(100): env.cmd('ft.debug', 'gc_forceinvoke', 'idx') env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0])) env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0])) # Replace the other one: env.cmd('ft.add', 'idx', 'doc2', 1.0, 'replace', 'fields', 't1', 'just text here') for _ in range(100): env.cmd('ft.debug', 'gc_forceinvoke', 'idx') env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0])) env.assertEqual(0, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0])) def testInfields(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0)) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields', 'title', 'hello world', 'body', 'lorem ipsum')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'title', 'hello world lorem ipsum', 'body', 'hello world')) res = r.execute_command( 'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent") env.assertEqual(3, len(res)) env.assertEqual(2, res[0]) env.assertEqual("doc2", res[1]) env.assertEqual("doc1", res[2]) res = r.execute_command( 'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc2", res[1]) res = r.execute_command( 'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc2", res[1]) res = r.execute_command( 'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc2", res[1]) res = r.execute_command( 'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) env.assertEqual("doc1", res[1]) res = r.execute_command( 'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent") env.assertEqual(3, len(res)) env.assertEqual(2, res[0]) env.assertEqual("doc2", res[1]) env.assertEqual("doc1", res[2]) def testScorerSelection(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'body', 'text')) # this is the default scorer res = r.execute_command( 'ft.search', 'idx', 'foo', 'scorer', 'TFIDF') env.assertEqual(res, [0]) with env.assertResponseError(): res = r.execute_command( 'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER') def testFieldSelectors(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc', 'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text')) #todo: document as breaking change, ft.add fields name are not case insentive env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields', 'TiTle', 'hello world', 'BoDy', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields', 'BoDy', 'hello world', 'TiTle', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt')) res = r.execute_command( 'ft.search', 'idx', '@TiTle:hello world', 'nocontent') env.assertEqual(res, [1, 'doc1']) res = r.execute_command( 'ft.search', 'idx', '@BoDy:hello world', 'nocontent') env.assertEqual(res, [1, 'doc2']) res = r.execute_command( 'ft.search', 'idx', '@BoDy:hello @TiTle:world', 'nocontent') env.assertEqual(res, [0]) res = r.execute_command( 'ft.search', 'idx', '@BoDy:hello world @TiTle:world', 'nocontent') env.assertEqual(res, [0]) res = r.execute_command( 'ft.search', 'idx', '@BoDy:(hello|foo) @TiTle:(world|bar)', 'nocontent') env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2'])) res = r.execute_command( 'ft.search', 'idx', '@BoDy:(hello|foo world|bar)', 'nocontent') env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2'])) res = r.execute_command( 'ft.search', 'idx', '@BoDy|TiTle:(hello world)', 'nocontent') env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2'])) res = r.execute_command( 'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent') env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2'])) res = r.execute_command( 'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent') env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2'])) def testStemming(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields', 'title', 'hello kitty')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'title', 'hello kitties')) res = r.execute_command( 'ft.search', 'idx', 'hello kitty', "nocontent") env.assertEqual(3, len(res)) env.assertEqual(2, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) # test for unknown language with env.assertResponseError(): res = r.execute_command( 'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian") def testExpander(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')) env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields', 'title', 'hello kitty')) res = r.execute_command( 'ft.search', 'idx', 'kitties', "nocontent", "expander", "SBSTEM" ) env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) res = r.execute_command( 'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander") env.assertEqual(1, len(res)) env.assertEqual(0, res[0]) res = r.execute_command( 'ft.search', 'idx', 'kitti', "nocontent") env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) res = r.execute_command( 'ft.search', 'idx', 'kitti', "nocontent", 'verbatim') env.assertEqual(1, len(res)) env.assertEqual(0, res[0]) # Calling a stem directly works even with VERBATIM. # You need to use the + prefix escaped res = r.execute_command( 'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim') env.assertEqual(2, len(res)) env.assertEqual(1, res[0]) def testNumericRange(env): r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric')) env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments") env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf") env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf") for i in xrange(100): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields', 'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i)) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent", "filter", "score", 0, 100) env.assertEqual(11, len(res)) env.assertEqual(100, res[0]) res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent", "filter", "score", 0, 50) env.assertEqual(51, res[0]) res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100, "filter", "score", "(0", "(50") env.assertEqual(49, res[0]) res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent", "filter", "score", "-inf", "+inf") env.assertEqual(100, res[0]) # test multi filters scrange = (19, 90) prrange = (290, 385) res = r.execute_command('ft.search', 'idx', 'hello kitty', "filter", "score", scrange[ 0], scrange[1], "filter", "price", prrange[0], prrange[1]) # print res for doc in res[2::2]: sc = int(doc[doc.index('score') + 1]) pr = int(doc[doc.index('price') + 1]) env.assertTrue(sc >= scrange[0] and sc <= scrange[1]) env.assertGreaterEqual(pr, prrange[0]) env.assertLessEqual(pr, prrange[1]) env.assertEqual(10, res[0]) res = r.execute_command('ft.search', 'idx', 'hello kitty', "filter", "score", "19", "90", "filter", "price", "90", "185") env.assertEqual(0, res[0]) # Test numeric ranges as part of query syntax res = r.execute_command( 'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent") env.assertEqual(11, len(res)) env.assertEqual(100, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent") env.assertEqual(51, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent") env.assertEqual(49, res[0]) res = r.execute_command( 'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent") env.assertEqual(49, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent") env.assertEqual(51, res[0]) res = r.execute_command( 'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent") env.assertEqual(100, res[0]) def testPayload(env): r = env env.expect('ft.create', 'idx', 'ON', 'HASH', 'PAYLOAD_FIELD', '__payload', 'schema', 'f', 'text').ok() for i in range(10): r.expect('ft.add', 'idx', '%d' % i, 1.0, 'payload', 'payload %d' % i, 'fields', 'f', 'hello world').ok() for x in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') res = r.execute_command('ft.search', 'idx', 'hello world') r.assertEqual(21, len(res)) res = r.execute_command('ft.search', 'idx', 'hello world', 'withpayloads') r.assertEqual(31, len(res)) r.assertEqual(10, res[0]) for i in range(1, 30, 3): r.assertEqual(res[i + 1], 'payload %s' % res[i]) def testGarbageCollector(env): env.skipOnCluster() if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs: # this test is not relevent for fork gc cause its not cleaning the last block raise unittest.SkipTest() N = 100 r = env r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text').ok() waitForIndex(r, 'idx') for i in range(N): r.expect('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))).ok() def get_stats(r): res = r.execute_command('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} gc_stats = {d['gc_stats'][x]: float( d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)} d['gc_stats'] = gc_stats return d stats = get_stats(r) if 'current_hz' in stats['gc_stats']: env.assertGreater(stats['gc_stats']['current_hz'], 8) env.assertEqual(0, stats['gc_stats']['bytes_collected']) env.assertGreater(int(stats['num_records']), 0) initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024 for i in range(N): r.expect('ft.del', 'idx', 'doc%d' % i).equal(1) for _ in range(100): # gc is random so we need to do it long enough times for it to work env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx') stats = get_stats(r) env.assertEqual(0, int(stats['num_docs'])) env.assertEqual(0, int(stats['num_records'])) if not env.is_cluster(): env.assertEqual(100, int(stats['max_doc_id'])) if 'current_hz' in stats['gc_stats']: env.assertGreater(stats['gc_stats']['current_hz'], 30) currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024 # print initialIndexSize, currentIndexSize, # stats['gc_stats']['bytes_collected'] env.assertGreater(initialIndexSize, currentIndexSize) env.assertGreater(stats['gc_stats'][ 'bytes_collected'], currentIndexSize) for i in range(10): res = r.execute_command('ft.search', 'idx', 'term%d' % i) env.assertEqual([0], res) def testReturning(env): env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f1', 'text', 'f2', 'text', 'n1', 'numeric', 'sortable', 'f3', 'text') for i in range(10): env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields', 'f2', 'val2', 'f1', 'val1', 'f3', 'val3', 'n1', i) # RETURN 0. Simplest case for x in env.retry_with_reload(): waitForIndex(env, 'idx') res = env.cmd('ft.search', 'idx', 'val*', 'return', '0') env.assertEqual(11, len(res)) env.assertEqual(10, res[0]) for r in res[1:]: env.assertTrue(r.startswith('DOC_')) for field in ('f1', 'f2', 'f3', 'n1'): res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field) env.assertEqual(21, len(res)) env.assertEqual(10, res[0]) for pair in grouper(res[1:], 2): docname, fields = pair env.assertEqual(2, len(fields)) env.assertEqual(field, fields[0]) env.assertTrue(docname.startswith('DOC_')) # Test that we don't return SORTBY fields if they weren't specified # also in RETURN res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1', 'sortby', 'n1', 'ASC') row = res[2] # get the first result env.assertEqual(['f1', 'val1'], row) # Test when field is not found res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist') env.assertEqual(21, len(res)) env.assertEqual(10, res[0]) # # Test that we don't crash if we're given the wrong number of fields with env.assertResponseError(): res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist') def _test_create_options_real(env, *options): options = [x for x in options if x] has_offsets = 'NOOFFSETS' not in options has_fields = 'NOFIELDS' not in options has_freqs = 'NOFREQS' not in options try: env.cmd('ft.drop', 'idx') # RS 2.0 ft.drop does not remove documents env.flush() except Exception as e: pass options = ['idx'] + options + ['ON', 'HASH', 'schema', 'f1', 'text', 'f2', 'text'] env.assertCmdOk('ft.create', *options) for i in range(10): env.assertCmdOk('ft.add', 'idx', 'doc{}'.format( i), 0.5, 'fields', 'f1', 'value for {}'.format(i)) # Query # res = env.cmd('ft.search', 'idx', "value for 3") # if not has_offsets: # env.assertIsNone(res) # else: # env.assertIsNotNone(res) # Frequencies: env.assertCmdOk('ft.add', 'idx', 'doc100', 1.0, 'fields', 'f1', 'foo bar') env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0, 'fields', 'f1', ('foo ' * 10) + ' bar') res = env.cmd('ft.search', 'idx', 'foo') env.assertEqual(2, res[0]) if has_offsets: docname = res[1] if has_freqs: # changed in minminheap PR. TODO: remove env.assertEqual('doc100', docname) else: env.assertEqual('doc100', docname) env.assertCmdOk('ft.add', 'idx', 'doc300', 1.0, 'fields', 'f1', 'Hello') res = env.cmd('ft.search', 'idx', '@f2:Hello') if has_fields: env.assertEqual(1, len(res)) else: env.assertEqual(3, len(res)) def testCreationOptions(env): from itertools import combinations for x in range(1, 5): for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x): _test_create_options_real(env, *combo) env.expect('ft.create', 'idx').error() def testInfoCommand(env): from itertools import combinations r = env env.assertOk(r.execute_command( 'ft.create', 'idx', 'ON', 'HASH', 'NOFIELDS', 'schema', 'title', 'text')) N = 50 for i in xrange(N): env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields', 'title', 'hello term%d' % i)) for _ in r.retry_with_rdb_reload(): waitForIndex(env, 'idx') res = r.execute_command('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertEqual(d['index_name'], 'idx') env.assertEqual(d['index_options'], ['NOFIELDS']) env.assertListEqual( d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']]) if not env.is_cluster(): env.assertEquals(int(d['num_docs']), N) env.assertEquals(int(d['num_terms']), N + 1) env.assertEquals(int(d['max_doc_id']), N) env.assertEquals(int(d['records_per_doc_avg']), 2) env.assertEquals(int(d['num_records']), N * 2) env.assertGreater(float(d['offset_vectors_sz_mb']), 0) env.assertGreater(float(d['key_table_size_mb']), 0) env.assertGreater(float(d['inverted_sz_mb']), 0) env.assertGreater(float(d['bytes_per_record_avg']), 0) env.assertGreater(float(d['doc_table_size_mb']), 0) for x in range(1, 5): for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x): combo = list(filter(None, combo)) options = combo + ['schema', 'f1', 'text'] try: env.cmd('ft.drop', 'idx') except: pass env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', *options) info = env.cmd('ft.info', 'idx') ix = info.index('index_options') env.assertFalse(ix == -1) opts = info[ix + 1] # make sure that an empty opts string returns no options in # info if not combo: env.assertListEqual([], opts) for option in filter(None, combo): env.assertTrue(option in opts) def testNoStem(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'body', 'text', 'name', 'text', 'nostem') if not env.isCluster(): # todo: change it to be more generic to pass on is_cluster res = env.cmd('ft.info', 'idx') env.assertEqual(res[7][1][5], 'NOSTEM') for _ in env.retry_with_reload(): waitForIndex(env, 'idx') try: env.cmd('ft.del', 'idx', 'doc') except redis.ResponseError: pass # Insert a document env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields', 'body', "located", 'name', "located") # Now search for the fields res_body = env.cmd('ft.search', 'idx', '@body:location') res_name = env.cmd('ft.search', 'idx', '@name:location') env.assertEqual(0, res_name[0]) env.assertEqual(1, res_body[0]) def testSortbyMissingField(env): # GH Issue 131 env.cmd('ft.create', 'ix', 'ON', 'HASH', 'schema', 'txt', 'text', 'num', 'numeric', 'sortable') env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo') env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num') def testParallelIndexing(env): # GH Issue 207 env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text') from threading import Thread env.getConnection() ndocs = 100 def runner(tid): cli = env.getConnection() for num in range(ndocs): cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0, 'fields', 'txt', 'hello world' * 20) ths = [] for tid in range(10): ths.append(Thread(target=runner, args=(tid,))) [th.start() for th in ths] [th.join() for th in ths] res = env.cmd('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertEqual(1000, int(d['num_docs'])) def testDoubleAdd(env): # Tests issue #210 env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text') env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world') with env.assertResponseError(): env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'goodbye world') env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1]) env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0]) env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0]) # Now with replace env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields', 'txt', 'goodbye world') env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0]) env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0]) env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1]) def testConcurrentErrors(env): from multiprocessing import Process import random env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text') docs_per_thread = 100 num_threads = 50 docIds = ['doc{}'.format(x) for x in range(docs_per_thread)] def thrfn(): myIds = docIds[::] random.shuffle(myIds) cli = env.getConnection() with cli.pipeline(transaction=False) as pl: for x in myIds: pl.execute_command('ft.add', 'idx', x, 1.0, 'fields', 'txt', ' hello world ' * 50) try: pl.execute() except Exception as e: pass # print e thrs = [Process(target=thrfn) for x in range(num_threads)] [th.start() for th in thrs] [th.join() for th in thrs] res = env.cmd('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertEqual(100, int(d['num_docs'])) def testBinaryKeys(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text') # Insert a document env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match') env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match') for _ in env.reloading_iterator(): waitForIndex(env, 'idx') exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']] res = env.cmd('ft.search', 'idx', 'match') for r in res: env.assertIn(r, exp) def testNonDefaultDb(env): if env.is_cluster(): raise unittest.SkipTest() # Should be ok env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'schema', 'txt', 'text') try: env.cmd('SELECT 1') except redis.ResponseError: return # Should fail with env.assertResponseError(): env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH', 'schema', 'txt', 'text') def testDuplicateNonspecFields(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text').ok() env.expect('FT.ADD', 'idx', 'doc', 1.0, 'fields', 'txt', 'foo', 'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3').ok() res = env.cmd('ft.get', 'idx', 'doc') res = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertTrue(res['f1'] in ('f1val', 'f1val2')) env.assertEqual('f1Val3', res['F1']) def testDuplicateFields(env): # As of RS 2.0 it is allowed. only latest field will be saved and indexed env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC', 'SORTABLE') env.expect('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS', 'txt', 'foo', 'txt', 'bar', 'txt', 'baz').ok() env.expect('FT.SEARCH idx *').equal([1L, 'doc', ['txt', 'baz']]) def testDuplicateSpec(env): with env.assertResponseError(): env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'text', 'n1', 'numeric', 'f1', 'text') def testSortbyMissingFieldSparse(env): # Note, the document needs to have one present sortable field in # order for the indexer to give it a sort vector env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'lastName', 'text', 'SORTABLE', 'firstName', 'text', 'SORTABLE') env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark') res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY", "firstName", "ASC", "limit", 0, 100) # commented because we don't filter out exclusive sortby fields # env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res) def testLuaAndMulti(env): env.skip() # addhash isn't supported if env.is_cluster(): raise unittest.SkipTest() # Ensure we can work in Lua and Multi environments without crashing env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'text', 'n1', 'numeric') env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4) env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5) r = env.getConnection() r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0") r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0) # Try in a pipeline: with r.pipeline(transaction=True) as pl: pl.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'f1', 'v3') pl.execute_command('ft.add', 'idx', 'doc3', 1.0, 'fields', 'f1', 'v4') pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0) pl.execute() def testLanguageField(env): env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'language', 'TEXT') env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'language', 'gibberish') res = env.cmd('FT.SEARCH', 'idx', 'gibberish') env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res) # The only way I can verify that LANGUAGE is parsed twice is ensuring we # provide a wrong language. This is much easier to test than trying to # figure out how a given word is stemmed with env.assertResponseError(): env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE', 'blah', 'FIELDS', 'language', 'gibber') def testUninitSortvector(env): # This would previously crash env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT') for x in range(2000): env.cmd('FT.ADD', 'idx', 'doc{}'.format( x), 1.0, 'FIELDS', 'f1', 'HELLO') env.broadcast('SAVE') for x in range(10): env.broadcast('DEBUG RELOAD') def normalize_row(row): return to_dict(row) def assertAggrowsEqual(env, exp, got): env.assertEqual(exp[0], got[0]) env.assertEqual(len(exp), len(got)) # and now, it's just free form: exp = sorted(to_dict(x) for x in exp[1:]) got = sorted(to_dict(x) for x in got[1:]) env.assertEqual(exp, got) def assertResultsEqual(env, exp, got, inorder=True): from pprint import pprint # pprint(exp) # pprint(got) env.assertEqual(exp[0], got[0]) env.assertEqual(len(exp), len(got)) exp = list(grouper(exp[1:], 2)) got = list(grouper(got[1:], 2)) for x in range(len(exp)): exp_did, exp_fields = exp[x] got_did, got_fields = got[x] env.assertEqual(exp_did, got_did, message="at position {}".format(x)) got_fields = to_dict(got_fields) exp_fields = to_dict(exp_fields) env.assertEqual(exp_fields, got_fields, message="at position {}".format(x)) def testAlterIndex(env): env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT') env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world') env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT') waitForIndex(env, 'idx') env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world') # RS 2.0 reindex and after reload both documents are found # for _ in env.retry_with_reload(): res = env.cmd('FT.SEARCH', 'idx', 'world') env.assertEqual(toSortedFlatList(res), toSortedFlatList([2L, 'doc2', ['f1', 'hello', 'f2', 'world'], 'doc1', ['f1', 'hello', 'f2', 'world']])) # env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret) env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE') for x in range(10): env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0, 'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x)) for _ in env.retry_with_reload(): waitForIndex(env, 'idx') # Test that sortable works res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC') exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', ['f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']] assertResultsEqual(env, exp, res) # Test that we can add a numeric field env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC') env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50) env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250) for _ in env.retry_with_reload(): waitForIndex(env, 'idx') res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]') env.assertEqual([1, 'docN1', ['n1', '50']], res) env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error() env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error() env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error() env.expect('FT.ALTER', 'idx', 'ADD', 'SCHEMA', 'f2', 'TEXT').error() env.expect('FT.ALTER', 'idx', 'f2', 'TEXT').error() def testAlterValidation(env): # Test that constraints for ALTER comand env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT') for x in range(1, 32): env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT') # OK for now. # Should be too many indexes env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT') env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT') # print env.cmd('FT.INFO', 'idx2') for x in range(1, 50): env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT') env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello') for _ in env.retry_with_reload(): waitForIndex(env, 'idx2') ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello') env.assertEqual([1, 'doc1', ['f50', 'hello']], ret) env.cmd('FT.CREATE', 'idx3', 'ON', 'HASH', 'SCHEMA', 'f0', 'text') # Try to alter the index with garbage env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3', 'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage') ret = to_dict(env.cmd('ft.info', 'idx3')) env.assertEqual(1, len(ret['fields'])) env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT') # test with no fields! env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD') # test with no fields! env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD') def testIssue366_2(env): # FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC # FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234 # FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111 # shutdown env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC') env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}', 'FIELDS', 'textfield', 'sometext', 'numfield', 1234) env.cmd('ft.add', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world2"}', 'REPLACE', 'PARTIAL', 'FIELDS', 'textfield', 'sometext', 'numfield', 1111) for _ in env.retry_with_reload(): pass # def testIssue654(env): # Crashes during FILTER env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'id', 'numeric') env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1) env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2) res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2) def testReplaceReload(env): env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC') # Create a document and then replace it. env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99) env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL', 'FIELDS', 'textfield', 's100', 'numfield', 990) env.dump_and_reload() # RDB Should still be fine env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL', 'FIELDS', 'textfield', 's200', 'numfield', 1090) doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2')) env.assertEqual('s200', doc['textfield']) env.assertEqual('1090', doc['numfield']) # command = 'FT.CREATE idx SCHEMA ' # for i in range(255): # command += 't%d NUMERIC SORTABLE ' % i # command = command[:-1] # r.execute_command(command) # r.execute_command('save') # // reload from ... # r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1') def testIssue417(env): command = ['ft.create', 'idx', 'ON', 'HASH', 'schema'] for x in range(255): command += ['t{}'.format(x), 'numeric', 'sortable'] command = command[:-1] env.cmd(*command) for _ in env.reloading_iterator(): waitForIndex(env, 'idx') try: env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1') except redis.ResponseError as e: env.assertTrue('already' in e.message.lower()) # >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT # >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com" # >FT.SEARCH myIdx "no-as" # Could not connect to Redis at 127.0.0.1:6379: Connection refused # >FT.SEARCH myIdx "no-as" # (error) Unknown Index name def testIssue422(env): env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema', 'title', 'TEXT', 'WEIGHT', '5.0', 'body', 'TEXT', 'url', 'TEXT') env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com') rv = env.cmd('ft.search', 'myIdx', 'no-as') env.assertEqual([0], rv) def testIssue446(env): env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema', 'title', 'TEXT', 'SORTABLE') env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com') rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0') env.assertEqual([1], rv) # Related - issue 635 env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello') rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0') env.assertEqual([2], rv) def testTimeout(env): env.skipOnCluster() num_range = 1000 env.cmd('ft.config', 'set', 'timeout', '1') env.cmd('ft.config', 'set', 'maxprefixexpansions', num_range) env.cmd('ft.create', 'myIdx', 'schema', 't', 'TEXT') for i in range(num_range): env.expect('HSET', 'doc%d'%i, 't', 'aa' + str(i)) env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'limit', '0', '0').noEqual([num_range]) env.expect('ft.config', 'set', 'on_timeout', 'fail').ok() env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'limit', '0', '0') \ .contains('Timeout limit was reached') res = env.cmd('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', 1000) env.assertEqual(res[0], num_range) # test erroneous params env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout').error() env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', -1).error() env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', 'STR').error() # test cursor res = env.cmd('FT.AGGREGATE', 'myIdx', 'aa*', 'WITHCURSOR', 'count', 50, 'timeout', 500) l = len(res[0]) - 1 # do not count the number of results (the first element in the results) cursor = res[1] time.sleep(0.01) while cursor != 0: r, cursor = env.cmd('FT.CURSOR', 'READ', 'myIdx', str(cursor)) l += (len(r) - 1) env.assertEqual(l, 1000) def testAlias(env): conn = getConnectionByEnv(env) env.cmd('ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc1', 'schema', 't1', 'text') env.cmd('ft.create', 'idx2', 'ON', 'HASH', 'PREFIX', 1, 'doc2', 'schema', 't1', 'text') env.expect('ft.aliasAdd', 'myIndex').raiseError() env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError() env.cmd('ft.aliasAdd', 'myIndex', 'idx') env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello') r = env.cmd('ft.search', 'idx', 'hello') env.assertEqual([1, 'doc1', ['t1', 'hello']], r) r2 = env.cmd('ft.search', 'myIndex', 'hello') env.assertEqual(r, r2) # try to add the same alias again; should be an error env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError() env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError() # now delete the index env.cmd('ft.drop', 'myIndex') # RS2 does not delete doc on ft.drop conn.execute_command('DEL', 'doc1') # index list should be cleared now. This can be tested by trying to alias # the old alias to different index env.cmd('ft.aliasAdd', 'myIndex', 'idx2') env.cmd('ft.aliasAdd', 'alias2', 'idx2') env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello') r = env.cmd('ft.search', 'alias2', 'hello') env.assertEqual([1L, 'doc2', ['t1', 'hello']], r) # check that aliasing one alias to another returns an error. This will # end up being confusing env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError() # check that deleting the alias works as expected env.expect('ft.aliasDel', 'myIndex').notRaiseError() env.expect('ft.search', 'myIndex', 'foo').raiseError() # create a new index and see if we can use the old name env.cmd('ft.create', 'idx3', 'ON', 'HASH', 'PREFIX', 1, 'doc3', 'schema', 't1', 'text') env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo') env.cmd('ft.aliasAdd', 'myIndex', 'idx3') # also, check that this works in rdb save for _ in env.retry_with_rdb_reload(): waitForIndex(env, 'myIndex') r = env.cmd('ft.search', 'myIndex', 'foo') env.assertEqual([1L, 'doc3', ['t1', 'foo']], r) # Check that we can move an alias from one index to another env.cmd('ft.aliasUpdate', 'myIndex', 'idx2') r = env.cmd('ft.search', 'myIndex', "hello") env.assertEqual([1L, 'doc2', ['t1', 'hello']], r) # Test that things like ft.get, ft.aggregate, etc. work r = env.cmd('ft.get', 'myIndex', 'doc2') env.assertEqual(['t1', 'hello'], r) r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1') env.assertEqual([1, ['t1', 'hello']], r) # Test update env.expect('ft.aliasAdd', 'updateIndex', 'idx3') env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx') r = env.cmd('ft.del', 'idx2', 'doc2') env.assertEqual(1, r) env.expect('ft.aliasdel').raiseError() env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError() env.expect('ft.aliasdel', 'non_existing_alias').raiseError() def testNoCreate(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f1', 'text') env.expect('ft.add', 'idx', 'schema', 'f1').raiseError() env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError() env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError() env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError() env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError() def testSpellCheck(env): env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT') env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content') rv = env.cmd('FT.SPELLCHECK', 'idx', '111111') env.assertEqual([['TERM', '111111', []]], rv) if not env.isCluster(): rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO') env.assertEqual([1L, ['TERM', '111111', []]], rv) # Standalone functionality def testIssue484(env): # Issue with split # 127.0.0.1:6379> ft.drop productSearch1 # OK # 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC" # OK # 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0 # OK # 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0 # OK # 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0 # OK # 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0 # OK # 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0 # OK # 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'ON', 'HASH', 'schema', 'productid', 'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric') env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0) env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0) env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0) env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0) env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0) res = env.cmd('FT.AGGREGATE', 'productSearch1', '*', 'load', '2', '@color', '@categoryid', 'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value', 'GROUPBY', '1', '@value', 'REDUCE', 'COUNT', '0', 'as', 'value_count', 'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC') expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']] assertAggrowsEqual(env, expected, res) for var in expected: env.assertIn(var, res) def testIssue501(env): env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT') env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content') env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield') rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq', 'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang') env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1]) env.assertEqual([], rv[0][2]) env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq', 'TERMS', 'FAKE_COMMAND', 'slang').error() def testIssue589(env): env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT') env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content') env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset") def testIssue621(env): env.expect('ft.create', 'test', 'ON', 'HASH', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK') env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK') env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK') res = env.cmd('ft.search', 'test', '@uuid:{foo}') env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'a', ['uuid', 'foo', 'title', 'bar']])) # Server crash on doc names that conflict with index keys #666 # again this test is not relevant cause index is out of key space # def testIssue666(env): # # We cannot reliably determine that any error will occur in cluster mode # # because of the key name # env.skipOnCluster() # env.cmd('ft.create', 'foo', 'schema', 'bar', 'text') # env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three') # # crashes here # with env.assertResponseError(): # env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six') # # try with replace: # with env.assertResponseError(): # env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE', # 'FIELDS', 'bar', 'four five six') # with env.assertResponseError(): # env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE', # 'FIELDS', 'bar', 'four five six') # env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six') # 127.0.0.1:6379> flushdb # OK # 127.0.0.1:6379> ft.create foo SCHEMA bar text # OK # 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three" # OK # 127.0.0.1:6379> keys * # 1) "mydoc" # 2) "ft:foo/one" # 3) "idx:foo" # 4) "ft:foo/two" # 5) "ft:foo/three" # 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six" # Could not connect to Redis at 127.0.0.1:6379: Connection refused def testPrefixDeletedExpansions(env): env.skipOnCluster() env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt1', 'text', 'tag1', 'tag') # get the number of maximum expansions maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1]) for x in range(maxexpansions): env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields', 'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x)) for x in range(maxexpansions): env.cmd('ft.del', 'idx', 'doc{}'.format(x)) env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ') # r = env.cmd('ft.search', 'idx', 'term*') # print(r) # r = env.cmd('ft.search', 'idx', '@tag1:{tag*}') # print(r) tmax = time.time() + 0.5 # 250ms max iters = 0 while time.time() < tmax: iters += 1 env.cmd('ft.debug', 'gc_forceinvoke', 'idx') r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}') if r[0]: break # print 'did {} iterations'.format(iters) r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}') env.assertEqual(toSortedFlatList([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']]), toSortedFlatList(r)) def testOptionalFilter(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text') for x in range(100): env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x)) env.cmd('ft.explain', 'idx', '(~@t1:word20)') # print(r) r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})') def testIssue736(env): #for new RS 2.0 ft.add does not return certian errors env.skip() # 1. create the schema, we need a tag field env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag') # 2. create a single document to initialize at least one RSAddDocumentCtx env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar') # 3. create a second document with many filler fields to force a realloc: extra_fields = [] for x in range(20): extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)] extra_fields += ['n2', 'not-a-number', 't2', 'random, junk'] with env.assertResponseError(): env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields) def testCriteriaTesterDeactivated(): env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1') env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text') env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2') env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey') env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey') expected_res = sorted([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']]) actual_res = sorted(env.cmd('ft.search', 'idx', '(hey hello1)|(hello2 hey)')) env.assertEqual(expected_res, actual_res) def testIssue828(env): env.cmd('ft.create', 'beers', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'PHONETIC', 'dm:en', 'style', 'TAG', 'SORTABLE', 'abv', 'NUMERIC', 'SORTABLE') rv = env.cmd("FT.ADD", "beers", "802", "1.0", "FIELDS", "index", "25", "abv", "0.049", "name", "Hell or High Watermelon Wheat (2009)", "style", "Fruit / Vegetable Beer") env.assertEqual('OK', rv) def testIssue862(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE') rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo") env.assertEqual('OK', rv) env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS') env.assertTrue(env.isUp()) def testIssue_884(env): env.expect('FT.create', 'idx', 'ON', 'HASH', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight', '50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight', '10', 'description', 'text', 'weight', '20').equal('OK') env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK') env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK') env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK') env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK') expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']] res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}') env.assertEquals(len(expected), len(res)) for v in expected: env.assertContains(v, res) def testIssue_848(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK') env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK') env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK') env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK') env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']]) def testMod_309(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') for i in range(100000): env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK') res = env.cmd('FT.AGGREGATE', 'idx', 'foo') env.assertEqual(len(res), 100001) # test with cursor res = env.cmd('FT.AGGREGATE', 'idx', 'foo', 'WITHCURSOR') l = len(res[0]) - 1 # do not count the number of results (the first element in the results) cursor = res[1] while cursor != 0: r, cursor = env.cmd('FT.CURSOR', 'READ', 'idx', str(cursor)) l += (len(r) - 1) env.assertEqual(l, 100000) def testIssue_865(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK') env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']]) env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']]) env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error() env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error() env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error() env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error() def testIssue_779(env): # FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change. # Note that "greater than" ">" does not have the same bug. env.cmd('FT.CREATE idx2 ON HASH SCHEMA ot1 TAG') env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"])) # NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"])) # OK is expected since 4001 < 4002 and the doc2 is updated env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"])) # OK is NOT expected since 4002 is not < 4002 # We expect NOADD and doc2 update; however, we get OK and doc2 updated # After fix, @ot1 implicitly converted to a number, thus we expect NOADD env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD') env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD') env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"])) # OK and doc2 update is expected since 4002 < 4003 env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "HORSE", "ot1", "4003"])) # Expect NOADD since 4003 is not > 4003 env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD') env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD') # Expect OK and doc2 updated since 4003 > 4002 env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK') res = env.cmd('FT.GET idx2 doc2') env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "PIG", "ot1", "4002"])) # Syntax errors env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error') env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error') def testUnknownSymbolErrorOnConditionalAdd(env): env.expect('FT.CREATE idx ON HASH SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok() env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok() env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error() def testWrongResultsReturnedBySkipOptimization(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK') env.expect('ft.search', 'idx', 'foo @f2:moo').equal([0L]) def testErrorWithApply(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK') err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1] env.assertEqual(str(err[0]), 'Invalid number of arguments for split') def testSummerizeWithAggregateRaiseError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error() def testSummerizeHighlightParseError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error() env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error() def testCursorBadArgument(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'WITHCURSOR', 'COUNT', 'BAD').error() def testLimitBadArgument(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error() def testOnTimeoutBadArgument(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error() def testAggregateSortByWrongArgument(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error() def testAggregateSortByMaxNumberOfFields(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE', 'test2', 'TEXT', 'SORTABLE', 'test3', 'TEXT', 'SORTABLE', 'test4', 'TEXT', 'SORTABLE', 'test5', 'TEXT', 'SORTABLE', 'test6', 'TEXT', 'SORTABLE', 'test7', 'TEXT', 'SORTABLE', 'test8', 'TEXT', 'SORTABLE', 'test9', 'TEXT', 'SORTABLE' ).equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error() args = ['@test%d' % (i + 1) for i in range(8)] + ['bad'] env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error() args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad'] env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error() args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX'] env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error() def testNumericFilterError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error() env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error() env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error() env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error() env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error() def testGeoFilterError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error() env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error() env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error() env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error() env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error() def testReducerError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error() env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error() def testGroupbyError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error() if not env.isCluster(): # todo: remove once fix on coordinator env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error() env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error() if not env.isCluster(): # todo: remove once fix on coordinator env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error() def testGroupbyWithSort(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK') env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']]) def testApplyError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error() def testLoadError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error() env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error() env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error() env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error() def testMissingArgsError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx').error() def testUnexistsScorer(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error() def testHighlightWithUnknowsProperty(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error() def testBadFilterExpression(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error() if not env.isCluster(): # todo: remove once fix on coordinator env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error() def testWithSortKeysOnNoneSortableValue(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']]) def testWithWithRawIds(env): env.skipOnCluster() # todo: remove once fix on coordinator env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') waitForIndex(env, 'idx') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']]) def testUnkownIndex(env): env.skipOnCluster() # todo: remove once fix on coordinator env.expect('ft.aggregate').error() env.expect('ft.aggregate', 'idx', '*').error() env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error() def testExplainError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('FT.EXPLAIN', 'idx', '(').error() def testBadCursor(env): env.expect('FT.CURSOR', 'READ', 'idx').error() env.expect('FT.CURSOR', 'READ', 'idx', '1111').error() env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error() env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error() env.expect('FT.CURSOR', 'bad', 'idx', '1111').error() def testGroupByWithApplyError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1] assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split') def testSubStrErrors(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a') env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a') env.assertTrue(env.isUp()) def testToUpperLower(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']]) env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']]) env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']]) env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']]) err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]]) env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]]) assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) def testMatchedTerms(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]]) env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]]) env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]]) env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]]) env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]]) def testStrFormatError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK') err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']]) # working example env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']]) env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']]) def testTimeFormatError(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK') err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) if not env.isCluster(): # todo: remove once fix on coordinator env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error() env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a') env.assertTrue(env.isUp()) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) def testMonthOfYear(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK') env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']]) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]]) def testParseTime(env): conn = getConnectionByEnv(env) conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TAG') conn.execute_command('HSET', 'doc1', 'test', '20210401') # check for errors err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(11)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(11,22)', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) # valid test res = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(@test, "%Y%m%d")', 'as', 'a') assertEqualIgnoreCluster(env, res, [1L, ['test', '20210401', 'a', '1617235200']]) def testMathFunctions(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK') env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']]) env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']]) def testErrorOnOpperation(env): env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK') env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK') err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1] assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError) def testSortkeyUnsortable(env): env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'test', 'text') env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo') rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys', 'load', '1', '@test', 'sortby', '1', '@test') env.assertEqual([1, '$foo', ['test', 'foo']], rv) def testIssue919(env): # This only works if the missing field has a lower sortable index # than the present field.. env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable') env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42) rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc') env.assertEqual([1L, 'doc1', ['n1', '42']], rv) def testIssue1074(env): # Ensure that sortable fields are returned in their string form from the # document env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text', 'n1', 'numeric', 'sortable') env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800) rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1') env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv) def testIssue1085(env): env.skipOnCluster() env.cmd('FT.CREATE issue1085 ON HASH SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE') for i in range(1, 10): env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i)) res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]') env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'document_8', ['foo', 'foo8', 'bar', '8']])) for i in range(1, 10): env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8') env.expect('ft.debug GC_FORCEINVOKE issue1085').equal('DONE') res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]') env.assertEqual(toSortedFlatList(res), toSortedFlatList([1, 'document_8', ['foo', 'foo8', 'bar', '8']])) def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" from itertools import izip_longest # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args) def to_dict(r): return {r[i]: r[i + 1] for i in range(0, len(r), 2)} def testInfoError(env): env.expect('ft.info', 'no_idx').error() def testIndexNotRemovedFromCursorListAfterRecreated(env): env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').ok() env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0]) env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').error() env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0]) def testHindiStemmer(env): env.cmd('FT.CREATE', 'idxTest', 'LANGUAGE_FIELD', '__language', 'SCHEMA', 'body', 'TEXT') env.cmd('FT.ADD', 'idxTest', 'doc1', 1.0, 'LANGUAGE', 'hindi', 'FIELDS', 'body', u'अँगरेजी अँगरेजों अँगरेज़') res = env.cmd('FT.SEARCH', 'idxTest', u'अँगरेज़') res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)} env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8')) def testMOD507(env): env.skipOnCluster() env.expect('ft.create idx ON HASH SCHEMA t1 TEXT').ok() for i in range(50): env.expect('ft.add idx doc-%d 1.0 FIELDS t1 foo' % i).ok() for i in range(50): env.expect('del doc-%d' % i).equal(1) res = env.cmd('FT.SEARCH', 'idx', '*', 'WITHSCORES', 'SUMMARIZE', 'FRAGS', '1', 'LEN', '25', 'HIGHLIGHT', 'TAGS', "<span style='background-color:yellow'>", "</span>") # from redisearch 2.0, docs are removed from index when `DEL` is called env.assertEqual(len(res), 1) def testUnseportedSortableTypeErrorOnTags(env): env.skipOnCluster() env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok() env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok() env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok() res = env.cmd('HGETALL doc1') env.assertEqual(toSortedFlatList(res), toSortedFlatList(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2', '__score', '1.0'])) res = env.cmd('FT.SEARCH idx *') env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']])) def testIssue1158(env): env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT') env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1') res = env.cmd('FT.GET idx doc1') env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '10', 'txt2', 'num1'])) # only 1st checked (2nd returns an error) env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK') env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD') # both are checked env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD') env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK') env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD') env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD') res = env.cmd('FT.GET idx doc1') env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '5', 'txt2', 'num2'])) def testIssue1159(env): env.cmd('FT.CREATE idx ON HASH SCHEMA f1 TAG') for i in range(1000): env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i) def testIssue1169(env): env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT') env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo') env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]]) def testIssue1184(env): env.skipOnCluster() field_types = ['TEXT', 'NUMERIC', 'TAG'] env.assertOk(env.execute_command('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0)) for ft in field_types: env.assertOk(env.execute_command('FT.CREATE idx ON HASH SCHEMA field ' + ft)) res = env.execute_command('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertEqual(d['inverted_sz_mb'], '0') env.assertEqual(d['num_records'], '0') value = '42' env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value)) doc = env.cmd('FT.SEARCH idx *') env.assertEqual(doc, [1L, 'doc0', ['field', value]]) res = env.execute_command('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertGreater(d['inverted_sz_mb'], '0') env.assertEqual(d['num_records'], '1') env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1) env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx') res = env.execute_command('ft.info', 'idx') d = {res[i]: res[i + 1] for i in range(0, len(res), 2)} env.assertEqual(d['inverted_sz_mb'], '0') env.assertEqual(d['num_records'], '0') env.cmd('FT.DROP idx') env.cmd('DEL doc0') def testIndexListCommand(env): env.expect('FT.CREATE idx1 ON HASH SCHEMA n NUMERIC').ok() env.expect('FT.CREATE idx2 ON HASH SCHEMA n NUMERIC').ok() res = env.cmd('FT._LIST') env.assertEqual(set(res), set(['idx1', 'idx2'])) env.expect('FT.DROP idx1').ok() env.expect('FT._LIST').equal(['idx2']) env.expect('FT.CREATE idx3 ON HASH SCHEMA n NUMERIC').ok() res = env.cmd('FT._LIST') env.assertEqual(set(res), set(['idx2', 'idx3'])) def testIssue1208(env): env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC') env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5') env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11') env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011') env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']]) env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']]) res = [3L, 'doc1', ['n', '1.0321e5'], 'doc2', ['n', '101.11'], 'doc3', ['n', '0.0011']] env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res) env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD') env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok() # print env.cmd('FT.SEARCH', 'idx', '@n:[-inf inf]') def testFieldsCaseSensetive(env): # this test will not pass on coordinator coorently as if one shard return empty results coordinator # will not reflect the errors env.skipOnCluster() conn = getConnectionByEnv(env) env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC f TEXT t TAG g GEO') # make sure text fields are case sesitive conn.execute_command('hset', 'doc1', 'F', 'test') conn.execute_command('hset', 'doc2', 'f', 'test') env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']]) env.expect('ft.search idx @F:test').equal([0]) # make sure numeric fields are case sesitive conn.execute_command('hset', 'doc3', 'N', '1.0') conn.execute_command('hset', 'doc4', 'n', '1.0') env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']]) env.expect('ft.search', 'idx', '@N:[0 2]').equal([0]) # make sure tag fields are case sesitive conn.execute_command('hset', 'doc5', 'T', 'tag') conn.execute_command('hset', 'doc6', 't', 'tag') env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']]) env.expect('ft.search', 'idx', '@T:{tag}').equal([0]) # make sure geo fields are case sesitive conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244') conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244') env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']]) env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0]) # make sure search filter are case sensitive env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0]) # make sure RETURN are case sensitive env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []]) # make sure SORTBY are case sensitive conn.execute_command('hset', 'doc7', 'n', '1.1') env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema') # make sure aggregation load are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n').equal([1L, ['n', '1'], ['n', '1.1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@N').equal([1L, [], []]) # make sure aggregation apply are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline') # make sure aggregation filter are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@n==1.0').equal([1L, ['n', '1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@N==1.0').error().contains('not loaded in pipeline') # make sure aggregation groupby are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property') # make sure aggregation sortby are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@N').error().contains('not loaded') def testSortedFieldsCaseSensetive(env): # this test will not pass on coordinator coorently as if one shard return empty results coordinator # will not reflect the errors env.skipOnCluster() conn = getConnectionByEnv(env) env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE f TEXT SORTABLE t TAG SORTABLE g GEO SORTABLE') # make sure text fields are case sesitive conn.execute_command('hset', 'doc1', 'F', 'test') conn.execute_command('hset', 'doc2', 'f', 'test') env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']]) env.expect('ft.search idx @F:test').equal([0]) # make sure numeric fields are case sesitive conn.execute_command('hset', 'doc3', 'N', '1.0') conn.execute_command('hset', 'doc4', 'n', '1.0') env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']]) env.expect('ft.search', 'idx', '@N:[0 2]').equal([0]) # make sure tag fields are case sesitive conn.execute_command('hset', 'doc5', 'T', 'tag') conn.execute_command('hset', 'doc6', 't', 'tag') env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']]) env.expect('ft.search', 'idx', '@T:{tag}').equal([0]) # make sure geo fields are case sesitive conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244') conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244') env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']]) env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0]) # make sure search filter are case sensitive env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0]) # make sure RETURN are case sensitive env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []]) # make sure SORTBY are case sensitive conn.execute_command('hset', 'doc7', 'n', '1.1') env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']]) env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema') # make sure aggregation apply are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline') # make sure aggregation filter are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@n==1.0').equal([1L, ['n', '1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@N==1.0').error().contains('not loaded in pipeline') # make sure aggregation groupby are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property') # make sure aggregation sortby are case sensitive env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']]) env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@N').error().contains('not loaded') def testScoreLangPayloadAreReturnedIfCaseNotMatchToSpecialFields(env): conn = getConnectionByEnv(env) env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE') conn.execute_command('hset', 'doc1', 'n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10') res = env.cmd('ft.search', 'idx', '@n:[0 2]') env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10']])) def testReturnSameFieldDifferentCase(env): conn = getConnectionByEnv(env) env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE') conn.execute_command('hset', 'doc1', 'n', '1.0', 'N', '2.0') env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '2', 'n', 'N').equal([1L, 'doc1', ['n', '1', 'N', '2']]) def testCreateIfNX(env): env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok() env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok() def testDropIfX(env): env.expect('FT._DROPIFX idx').ok() def testDeleteIfX(env): env.expect('FT._DROPINDEXIFX idx').ok() def testAlterIfNX(env): env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok() env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok() env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok() res = env.cmd('ft.info idx') res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}['fields'] env.assertEqual(res, [['n', 'type', 'NUMERIC'], ['n1', 'type', 'NUMERIC']]) def testAliasAddIfNX(env): env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok() env.expect('FT._ALIASADDIFNX a1 idx').ok() env.expect('FT._ALIASADDIFNX a1 idx').ok() def testAliasDelIfX(env): env.expect('FT._ALIASDELIFX a1').ok() def testEmptyDoc(env): conn = getConnectionByEnv(env) env.expect('FT.CREATE idx SCHEMA t TEXT').ok() env.expect('FT.ADD idx doc1 1 FIELDS t foo').ok() env.expect('FT.ADD idx doc2 1 FIELDS t foo').ok() env.expect('FT.ADD idx doc3 1 FIELDS t foo').ok() env.expect('FT.ADD idx doc4 1 FIELDS t foo').ok() env.expect('FT.SEARCH idx * limit 0 0').equal([4]) conn.execute_command('DEL', 'doc1') conn.execute_command('DEL', 'doc3') env.expect('FT.SEARCH idx *').equal([2L, 'doc2', ['t', 'foo'], 'doc4', ['t', 'foo']]) def testRED47209(env): conn = getConnectionByEnv(env) env.expect('FT.CREATE idx SCHEMA t TEXT').ok() conn.execute_command('hset', 'doc1', 't', 'foo') if env.isCluster(): # on cluster we have WITHSCORES set unconditionally for FT.SEARCH res = [1L, 'doc1', ['t', 'foo']] else: res = [1L, 'doc1', None, ['t', 'foo']] env.expect('FT.SEARCH idx foo WITHSORTKEYS LIMIT 0 1').equal(res) def testInvertedIndexWasEntirelyDeletedDuringCursor(): env = Env(moduleArgs='GC_POLICY FORK FORK_GC_CLEAN_THRESHOLD 1') env.skipOnCluster() env.expect('FT.CREATE idx SCHEMA t TEXT').ok() env.expect('HSET doc1 t foo').equal(1) env.expect('HSET doc2 t foo').equal(1) res, cursor = env.cmd('FT.AGGREGATE idx foo WITHCURSOR COUNT 1') env.assertEqual(res, [1L, []]) # delete both documents and run the GC to clean 'foo' inverted index env.expect('DEL doc1').equal(1) env.expect('DEL doc2').equal(1) env.cmd('FT.DEBUG GC_FORCEINVOKE idx') # make sure the inverted index was cleaned env.expect('FT.DEBUG DUMP_INVIDX idx foo').error().contains('not find the inverted index') # read from the cursor res, cursor = env.cmd('FT.CURSOR READ idx %d' % cursor) env.assertEqual(res, [0L]) env.assertEqual(cursor, 0) def testNegativeOnly(env): conn = getConnectionByEnv(env) env.expect('FT.CREATE idx SCHEMA t TEXT').ok() conn.execute_command('HSET', 'doc1', 'not', 'foo') env.expect('FT.SEARCH idx *').equal([1L, 'doc1', ['not', 'foo']]) env.expect('FT.SEARCH', 'idx', '-bar').equal([1L, 'doc1', ['not', 'foo']]) def testNotOnly(env): conn = getConnectionByEnv(env) conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt1', 'TEXT') conn.execute_command('HSET', 'a', 'txt1', 'hello', 'txt2', 'world') conn.execute_command('HSET', 'b', 'txt1', 'world', 'txt2', 'hello') env.expect('ft.search idx !world').equal([1L, 'b', ['txt1', 'world', 'txt2', 'hello']]) def testServerVer(env): env.assertTrue(check_server_version(env, "0.0.0")) env.assertTrue(not check_server_version(env, "500.0.0")) env.assertTrue(check_module_version(env, "20005")) env.assertTrue(not check_module_version(env, "10000000")) def testSchemaWithAs(env): conn = getConnectionByEnv(env) # sanity conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt', 'AS', 'foo', 'TEXT') conn.execute_command('HSET', 'a', 'txt', 'hello') conn.execute_command('HSET', 'b', 'foo', 'world') for _ in env.retry_with_rdb_reload(): env.expect('ft.search idx @txt:hello').equal([0L]) env.expect('ft.search idx @txt:world').equal([0L]) env.expect('ft.search idx @foo:hello').equal([1L, 'a', ['txt', 'hello']]) env.expect('ft.search idx @foo:world').equal([0L]) # RETURN from schema env.expect('ft.search idx hello RETURN 1 txt').equal([1L, 'a', ['txt', 'hello']]) env.expect('ft.search idx hello RETURN 1 foo').equal([1L, 'a', ['foo', 'hello']]) env.expect('ft.search idx hello RETURN 3 txt AS baz').equal([1L, 'a', ['baz', 'hello']]) env.expect('ft.search idx hello RETURN 3 foo AS baz').equal([1L, 'a', []]) env.expect('ft.search idx hello RETURN 6 txt AS baz txt AS bar').equal([1L, 'a', ['baz', 'hello', 'bar', 'hello']]) env.expect('ft.search idx hello RETURN 6 txt AS baz txt AS baz').equal([1L, 'a', ['baz', 'hello']]) # RETURN outside of schema conn.execute_command('HSET', 'a', 'not_in_schema', '42') res = conn.execute_command('HGETALL', 'a') env.assertEqual(res, {'txt': 'hello', 'not_in_schema': '42'}) env.expect('ft.search idx hello RETURN 3 not_in_schema AS txt2').equal([1L, 'a', ['txt2', '42']]) env.expect('ft.search idx hello RETURN 1 not_in_schema').equal([1L, 'a', ['not_in_schema', '42']]) env.expect('ft.search idx hello').equal([1L, 'a', ['txt', 'hello', 'not_in_schema', '42']]) env.expect('ft.search idx hello RETURN 3 not_exist as txt2').equal([1L, 'a', []]) env.expect('ft.search idx hello RETURN 1 not_exist').equal([1L, 'a', []]) env.expect('ft.search idx hello RETURN 3 txt as as').error().contains('Alias for RETURN cannot be `AS`') # LOAD for FT.AGGREGATE # for path - can rename env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@txt').equal([1L, ['txt', 'hello']]) env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@txt', 'AS', 'txt1').equal([1L, ['txt1', 'hello']]) # for name - cannot rename env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@foo').equal([1L, ['foo', 'hello']]) env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@foo', 'AS', 'foo1').equal([1L, []]) # for for not in schema - can rename env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@not_in_schema').equal([1L, ['not_in_schema', '42']]) env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@not_in_schema', 'AS', 'NIS').equal([1L, ['NIS', '42']]) conn.execute_command('HDEL', 'a', 'not_in_schema') def testSchemaWithAs_Alter(env): conn = getConnectionByEnv(env) # sanity conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt', 'AS', 'foo', 'TEXT') conn.execute_command('HSET', 'a', 'txt', 'hello') conn.execute_command('HSET', 'b', 'foo', 'world') # FT.ALTER conn.execute_command('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'foo', 'AS', 'bar', 'TEXT') waitForIndex(env, 'idx') env.expect('ft.search idx @bar:hello').equal([0L]) env.expect('ft.search idx @bar:world').equal([1L, 'b', ['foo', 'world']]) env.expect('ft.search idx @foo:world').equal([0L]) def testSchemaWithAs_Duplicates(env): conn = getConnectionByEnv(env) conn.execute_command('HSET', 'a', 'txt', 'hello') # Error if field name is duplicated res = env.expect('FT.CREATE', 'conflict1', 'SCHEMA', 'txt1', 'AS', 'foo', 'TEXT', 'txt2', 'AS', 'foo', 'TAG') \ .error().contains('Duplicate field in schema - foo') # Success if field path is duplicated res = env.expect('FT.CREATE', 'conflict2', 'SCHEMA', 'txt', 'AS', 'foo1', 'TEXT', 'txt', 'AS', 'foo2', 'TEXT').ok() waitForIndex(env, 'conflict2') env.expect('ft.search conflict2 @foo1:hello').equal([1L, 'a', ['txt', 'hello']]) env.expect('ft.search conflict2 @foo2:hello').equal([1L, 'a', ['txt', 'hello']]) env.expect('ft.search conflict2 @foo1:world').equal([0L]) env.expect('ft.search conflict2 @foo2:world').equal([0L]) def testMod1407(env): conn = getConnectionByEnv(env) env.expect('FT.CREATE', 'idx', 'SCHEMA', 'limit', 'TEXT', 'LimitationTypeID', 'TAG', 'LimitationTypeDesc', 'TEXT').ok() conn.execute_command('HSET', 'doc1', 'limit', 'foo1', 'LimitationTypeID', 'boo1', 'LimitationTypeDesc', 'doo1') conn.execute_command('HSET', 'doc2', 'limit', 'foo2', 'LimitationTypeID', 'boo2', 'LimitationTypeDesc', 'doo2') env.expect('FT.AGGREGATE', 'idx', '*', 'SORTBY', '3', '@limit', '@LimitationTypeID', 'ASC').equal([2L, ['limit', 'foo1', 'LimitationTypeID', 'boo1'], ['limit', 'foo2', 'LimitationTypeID', 'boo2']]) # make sure the crashed query is not crashing anymore env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '2', 'LLimitationTypeID', 'LLimitationTypeDesc', 'REDUCE', 'COUNT', '0') # make sure correct query not crashing and return the right results env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '2', '@LimitationTypeID', '@LimitationTypeDesc', 'REDUCE', 'COUNT', '0').equal([2L, ['LimitationTypeID', 'boo2', 'LimitationTypeDesc', 'doo2', '__generated_aliascount', '1'], ['LimitationTypeID', 'boo1', 'LimitationTypeDesc', 'doo1', '__generated_aliascount', '1']]) def testMod1452(env): if not env.isCluster(): # this test is only relevant on cluster env.skip() conn = getConnectionByEnv(env) env.expect('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT').ok() conn.execute_command('HSET', 'doc1', 't', 'foo') # here we only check that its not crashing env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '1', 'foo', 'REDUCE', 'FIRST_VALUE', 3, '@not_exists', 'BY', '@foo')
test_processfamily.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import str from builtins import range from builtins import * from future.utils import text_to_native_str __author__ = 'matth' import unittest import sys from processfamily.test import ParentProcess, Config import os import subprocess import requests import time import socket import logging import glob from processfamily.processes import process_exists, kill_process, AccessDeniedError from processfamily import _traceback_str import signal import threading from processfamily.futurecompat import get_env_dict, list_to_native_str if sys.platform.startswith('win'): from processfamily._winprocess_ctypes import CAN_USE_EXTENDED_STARTUPINFO, CREATE_BREAKAWAY_FROM_JOB class _BaseProcessFamilyFunkyWebServerTestSuite(unittest.TestCase): skip_crash_test = None def setUp(self): self.pid_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'pid') if not os.path.exists(self.pid_dir): os.makedirs(self.pid_dir) for pid_file in self.get_pid_files(): with open(pid_file, "r") as f: pid = f.read().strip() if pid and self.process_exists_or_access_denied(int(pid)): logging.warning( ("Process with pid %s is stilling running. This could be a problem " + \ "(but it might be a new process with a recycled pid so I'm not killing it).") % pid ) else: os.remove(pid_file) self.check_server_ports_unbound() def process_exists_or_access_denied(self, pid): try: return process_exists(pid) except AccessDeniedError as e: #It is most likely that this process does exist! return True def kill_process_ignore_access_denied(self, pid): try: return kill_process(pid) except AccessDeniedError as e: #Can't do anything about this pass def try_and_stop_everything_for_tear_down(self): #Override this if you can do something about stopping everything pass def tearDown(self): command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt') if os.path.exists(command_file): os.remove(command_file) self.wait_for_parent_to_stop(5) #Now check that no processes are left over: start_time = time.time() processes_left_running = [] for pid_file in self.get_pid_files(): with open(pid_file, "r") as f: pid = f.read().strip() if pid: while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 5: time.sleep(0.3) if self.process_exists_or_access_denied(int(pid)): processes_left_running.append(int(pid)) os.remove(pid_file) if processes_left_running: for pid in processes_left_running: try: self.kill_process_ignore_access_denied(pid) except Exception as e: logging.warning("Error killing process with pid %d: %s", pid, _traceback_str()) self.try_and_stop_everything_for_tear_down() start_time = time.time() for pid in processes_left_running: while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 40: time.sleep(0.3) self.check_server_ports_unbound() self.assertFalse(processes_left_running, msg="There should have been no PIDs left running but there were: %s" % (', '.join([str(p) for p in processes_left_running]))) def start_up(self, test_command=None, wait_for_middle_child=True, wait_for_children=True, parent_timeout=None): command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt') if test_command: with open(command_file, "w") as f: f.write(test_command) elif os.path.exists(command_file): os.remove(command_file) self.start_parent_process(timeout=parent_timeout) #Wait up to 15 secs for the all ports to be available (the parent might wait 10 for a middle child): start_time = time.time() still_waiting = True ports_to_wait = list(range(4)) if wait_for_children else [0] if not wait_for_middle_child: ports_to_wait.remove(2) while still_waiting and time.time() - start_time < 15: still_waiting = False for i in ports_to_wait: try: s = socket.socket() try: s.connect(("localhost", Config.get_starting_port_nr()+i)) except socket.error as e: still_waiting = True break finally: s.close() if still_waiting: time.sleep(0.3) self.assertFalse(still_waiting, "Waited 10 seconds and some http ports are still not accessible") def assert_middle_child_port_unbound(self): port = Config.get_starting_port_nr()+2 logging.info("Checking for ability to bind to port %d", port) serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: if not sys.platform.startswith('win'): #On linux I need this setting cos we are starting and stopping things #so frequently that they are still in a STOP_WAIT state when I get here serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversocket.bind(("", port)) except Exception as e: self.fail("Middle child port is not unbound as expected") finally: serversocket.close() def get_pid_files(self): return glob.glob(os.path.join(self.pid_dir, "*.pid")) def kill_parent(self): for pid_file in self.get_pid_files(): if os.path.basename(pid_file).startswith('c'): continue with open(pid_file, "r") as f: pid = f.read().strip() kill_process(int(pid)) def check_stop(self, force_kills=0, timeout=None): """Checks that a stop succeeds, and that the number of child processes that had to be terminated is as expected""" params = {"timeout": str(timeout)} if timeout else {} child_processes_terminated = self.send_parent_http_command("stop", params=params) if child_processes_terminated != str(force_kills): raise ValueError("Stop received, but parent reports %r instead of %r child processes terminated", child_processes_terminated, force_kills) def test_parent_stop(self): self.start_up() self.check_stop() def test_parent_exit(self): self.start_up() self.send_parent_http_command("exit") def test_parent_crash(self): if self.skip_crash_test: self.skipTest(self.skip_crash_test) self.start_up() self.send_parent_http_command("crash") def test_parent_interrupt_main(self): self.start_up() self.send_parent_http_command("interrupt_main") def test_parent_kill(self): self.start_up() self.kill_parent() def test_parent_stop_child_locked_up(self): self.start_up() self.freeze_up_middle_child() self.check_stop(1, timeout=5) #This needs time to wait for the child for 10 seconds: self.wait_for_parent_to_stop(11) def test_parent_exit_child_locked_up(self): self.start_up() self.freeze_up_middle_child() self.send_parent_http_command("exit") def test_parent_crash_child_locked_up(self): if self.skip_crash_test: self.skipTest(self.skip_crash_test) self.start_up() self.freeze_up_middle_child() self.send_parent_http_command("crash") def test_parent_interrupt_main_child_locked_up(self): self.start_up() self.freeze_up_middle_child() self.send_parent_http_command("interrupt_main") #This needs time to wait for the child for 10 seconds: self.wait_for_parent_to_stop(11) def test_parent_kill_child_locked_up(self): self.start_up() self.freeze_up_middle_child() self.kill_parent() def test_parent_exit_child_locked_up(self): self.start_up() self.freeze_up_middle_child() self.send_parent_http_command("exit") def test_child_exit_on_start(self): self.start_up(test_command='child_exit_on_start', wait_for_middle_child=False) self.assert_middle_child_port_unbound() self.check_stop() def test_child_error_during_run(self): self.start_up(test_command='child_error_during_run', wait_for_middle_child=False) self.check_stop() def test_child_freeze_on_start(self): self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False, parent_timeout=2) self.assert_middle_child_port_unbound() self.check_stop(1, timeout=5) def test_child_error_on_start(self): self.start_up(test_command='child_error_on_start', wait_for_middle_child=False) self.assert_middle_child_port_unbound() self.check_stop() def test_child_error_during_init(self): self.start_up(test_command='child_error_during_init', wait_for_middle_child=False) self.assert_middle_child_port_unbound() self.check_stop() def test_child_freeze_during_init(self): self.start_up(test_command='child_freeze_during_init', wait_for_middle_child=False, parent_timeout=2) self.assert_middle_child_port_unbound() self.check_stop(1, timeout=5) self.wait_for_parent_to_stop(11) def test_child_crash_on_start(self): if self.skip_crash_test: self.skipTest(self.skip_crash_test) self.start_up(test_command='child_crash_on_start', wait_for_middle_child=False) self.assert_middle_child_port_unbound() self.check_stop() if not sys.platform.startswith('win'): def test_sigint(self): self.start_up() os.kill(self.parent_process.pid, signal.SIGINT) def test_sigint_child_locked_up(self): self.start_up() self.freeze_up_middle_child() os.kill(self.parent_process.pid, signal.SIGINT) #This needs time to wait for the child for 10 seconds: self.wait_for_parent_to_stop(11) def test_file_open_by_parent_before_fork_can_be_closed_and_deleted(self): self.start_up() result = self.send_parent_http_command("close_file_and_delete_it") self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result) self.check_stop() def test_echo_std_err_on(self): self.start_up(test_command='echo_std_err') self.check_stop() def test_handles_over_commandline_off(self): if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO: self.skipTest("This test is not supported on this platform") self.start_up(test_command='handles_over_commandline_off') self.check_stop() def test_handles_over_commandline_off_close_fds_off(self): if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO: self.skipTest("This test is not supported on this platform") self.start_up(test_command='handles_over_commandline_off_close_fds_off') result = self.send_parent_http_command("close_file_and_delete_it") self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result) self.check_stop() def test_close_fds_off(self): self.start_up(test_command='close_fds_off') result = self.send_parent_http_command("close_file_and_delete_it") if sys.platform.startswith('win'): #On linux this works fine self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result) else: #TODO: a relevant test on linux? pass self.check_stop() def test_child_comms_strategy_stdin_close(self): self.start_up(test_command='use_cat', wait_for_children=False) self.check_stop() def test_child_comms_strategy_none(self): self.start_up(test_command='use_cat_comms_none', wait_for_children=False) # we don't actually have the ability to tell these children to stop self.check_stop(3) def test_child_comms_strategy_signal(self): self.start_up(test_command='use_signal', wait_for_children=False) # since we're not waiting for the children to start up, give them a chance to register signal handlers time.sleep(0.5) self.check_stop() def test_use_job_object_off(self): self.start_up(test_command= 'use_job_object_off') self.check_stop() def test_cpu_affinity_off(self): self.start_up(test_command='cpu_affinity_off') self.check_stop() def test_handles_over_commandline_off_file_open_by_parent(self): if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO: self.skipTest("This test is not supported on this platform") self.start_up(test_command='handles_over_commandline_off') result = self.send_parent_http_command("close_file_and_delete_it") self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result) self.check_stop() def freeze_up_middle_child(self): #First check that we can do this fast (i.e. things aren't stuttering because of environment): for i in range(5): self.send_middle_child_http_command("", timeout=1) self.send_middle_child_http_command("hold_gil?t=%d" % (60*10)) #Freeze up for 10 minutes while True: #Try and do this request until it takes longer than 1 sec - this would mean that we have successfully got stuck try: self.send_middle_child_http_command("", timeout=1) except requests.exceptions.Timeout as t: break def check_server_ports_unbound(self): bound_ports = [] for pnumber in range(4): port = Config.get_starting_port_nr() + pnumber #I just try and bind to the server port and see if I have a problem: logging.info("Checking for ability to bind to port %d", port) serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: if not sys.platform.startswith('win'): #On linux I need this setting cos we are starting and stopping things #so frequently that they are still in a STOP_WAIT state when I get here serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversocket.bind(("", port)) except Exception as e: bound_ports.append(port) finally: serversocket.close() self.assertFalse(bound_ports, "The following ports are still bound: %s" % ', '.join([str(p) for p in bound_ports])) def get_path_to_ParentProcessPy(self): return os.path.join(os.path.dirname(__file__), 'test', 'ParentProcess.py') def send_parent_http_command(self, command, params=None, **kwargs): return self.send_http_command(Config.get_starting_port_nr(), command, params=params, **kwargs) def send_middle_child_http_command(self, command, params=None, **kwargs): return self.send_http_command(Config.get_starting_port_nr()+2, command, params=params, **kwargs) def send_http_command(self, port, command, params=None, **kwargs): r = requests.get('http://localhost:%d/%s' % (port, command), params=params, **kwargs) j = r.json if callable(j): return j() else: #This is the old requests api: return j def wait_for_process_to_stop(self, process, timeout): if process is None: logging.info("No process to wait for") return logging.info("Waiting for process (%d) to finish", process.pid) start_time = time.time() while time.time()-start_time < timeout: if process.poll() is None: time.sleep(0.3) else: return class NormalSubprocessTests(_BaseProcessFamilyFunkyWebServerTestSuite): skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None def start_parent_process(self, timeout=None): kwargs={} if sys.platform.startswith('win'): kwargs['creationflags'] = CREATE_BREAKAWAY_FROM_JOB environ = get_env_dict() if timeout: environ[text_to_native_str("STARTUP_TIMEOUT")] = text_to_native_str(timeout) self.parent_process = subprocess.Popen( list_to_native_str([sys.executable, self.get_path_to_ParentProcessPy()]), close_fds=True, env=environ, **kwargs) threading.Thread(target=self.parent_process.communicate).start() def wait_for_parent_to_stop(self, timeout): self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout) if sys.platform.startswith('win'): import win32service import win32serviceutil from processfamily.test.ExeBuilder import build_service_exe from processfamily.processes import USE_PROCESS_QUERY_LIMITED_INFORMATION class PythonWTests(_BaseProcessFamilyFunkyWebServerTestSuite): skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None def start_parent_process(self, timeout=None): self.parent_process = subprocess.Popen( [Config.pythonw_exe, self.get_path_to_ParentProcessPy()], close_fds=True, creationflags=CREATE_BREAKAWAY_FROM_JOB) threading.Thread(target=self.parent_process.communicate).start() def wait_for_parent_to_stop(self, timeout): self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout) class WindowsServiceTests(_BaseProcessFamilyFunkyWebServerTestSuite): @classmethod def setUpClass(cls, service_username=None): cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors() cls.service_exe = build_service_exe() cmd = [cls.service_exe] + (["--username", service_username] if service_username else []) + ["install"] subprocess.check_call(list_to_native_str(cmd)) @classmethod def tearDownClass(cls): if hasattr(cls, 'service_exe'): subprocess.check_call([cls.service_exe, "remove"]) def try_and_stop_everything_for_tear_down(self): self.send_stop_and_then_wait_for_service_to_stop_ignore_errors() def start_parent_process(self, timeout=None): win32serviceutil.StartService(Config.svc_name) def wait_for_parent_to_stop(self, timeout): self.wait_for_service_to_stop(timeout) @classmethod def wait_for_service_to_stop(cls, timeout): start_time = time.time() while time.time()-start_time < timeout: if win32serviceutil.QueryServiceStatus(Config.svc_name)[1] != win32service.SERVICE_STOPPED: time.sleep(0.3) def test_parent_interrupt_main(self): self.skipTest("Interrupt main doesn't do anything useful in a windows service") def test_parent_interrupt_main_child_locked_up(self): self.skipTest("Interrupt main doesn't do anything useful in a windows service") def test_service_stop(self): self.start_up() win32serviceutil.StopService(Config.svc_name) def test_service_stop_child_locked_up(self): self.start_up() self.freeze_up_middle_child() win32serviceutil.StopService(Config.svc_name) #This needs time to wait for the child for 10 seconds: self.wait_for_parent_to_stop(11) def test_service_stop_child_freeze_on_start(self): self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False) self.assert_middle_child_port_unbound() win32serviceutil.StopService(Config.svc_name) #This still needs time to wait for the child to stop for 10 seconds: self.wait_for_parent_to_stop(11) @classmethod def send_stop_and_then_wait_for_service_to_stop_ignore_errors(cls): try: win32serviceutil.StopService(Config.svc_name) cls.wait_for_service_to_stop(20) except Exception as e: pass if not USE_PROCESS_QUERY_LIMITED_INFORMATION: def test_parent_kill(self): self.skipTest("I cannot kill a network service service from here - I get an access denied error") def test_parent_kill_child_locked_up(self): self.skipTest("I cannot kill a network service service from here - I get an access denied error") class WindowsServiceNetworkServiceUserTests(WindowsServiceTests): @staticmethod def grant_network_service_rights(folder, rights): try: subprocess.check_call(["cmd.exe", "/C", "icacls", folder, "/grant", "NETWORK SERVICE:(OI)(CI)%s" % rights]) except Exception as e: logging.warning("icacls command returned a non-zero response for folder/file '%s'") @classmethod def setUpClass(cls): #I do this just in case we left the service running by interrupting the tests cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors() tmp_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp') if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) #Make sure network service has full access to the tmp folder (and these are inheritable) cls.grant_network_service_rights(tmp_dir, "F") #And read / execute access to Python, and other folders on the python path: cls.grant_network_service_rights(os.path.abspath(sys.prefix), "RX") done_paths = [os.path.abspath(sys.prefix)] for path_item in sorted(sys.path, key=lambda p: len(os.path.abspath(p))): abspath_item = os.path.abspath(path_item) already_done = False for p in done_paths: if abspath_item.startswith(p): already_done = True break if not already_done: cls.grant_network_service_rights(abspath_item, "RX") done_paths.append(abspath_item) super(WindowsServiceNetworkServiceUserTests, cls).setUpClass(service_username="NT AUTHORITY\\NetworkService") def test_parent_kill(self): self.skipTest("I cannot kill a network service service from here - I get an access denied error") def test_parent_kill_child_locked_up(self): self.skipTest("I cannot kill a network service service from here - I get an access denied error") #Remove the base class from the module dict so it isn't smelled out by nose: del(_BaseProcessFamilyFunkyWebServerTestSuite)
Server.py
#!/usr/bin/env python # coding: utf-8 # In[ ]: from pymouse import PyMouse from pykeyboard import PyKeyboard import socket import time import os os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide" import pygame import pyscreenshot as ImageGrab import sys import select import threading from tkinter import * from tkinter import filedialog # In[ ]: keyDict = { 8 : 22, 9 : 23, 12 : 0, 13 : 36, 19 : 127, 27 : 9, 32 : 65, 33 : 10, 34 : 48, 35 : 12, 36 : 13, 38 : 16, 39 : 48, 40 : 187, 41 : 188, 42 : 17, 43 : 21, 44 : 59, 45 : 20, 46 : 60, 47 : 61, 48 : 19, 49 : 10, 50 : 11, 51 : 12, 52 : 13, 53 : 14, 54 : 15, 55 : 16, 56 : 17, 57 : 18, 58 : 47, 59 : 47, 60 : 94, 61 : 21, 62 : 60, 63 : 61, 64 : 11, 91 : 34, 92 : 51, 93 : 35, 94 : 15, 95 : 20, 96 : 49, 97 : 38, 98 : 56, 99 : 54, 100 : 40, 101 : 26, 102 : 41, 103 : 42, 104 : 43, 105 : 31, 106 : 44, 107 : 45, 108 : 46, 109 : 58, 110 : 57, 111 : 32, 112 : 33, 113 : 24, 114 : 27, 115 : 39, 116 : 28, 117 : 30, 118 : 55, 119 : 25, 120 : 53, 121 : 29, 122 : 52, 127 : 119, 256 : 19, 257 : 10, 258 : 11, 259 : 12, 260 : 13, 261 : 14, 262 : 15, 263 : 16, 264 : 17, 265 : 18, 266 : 60, 267 : 61, 268 : 17, 269 : 20, 270 : 21, 271 : 36, 272 : 21, 273 : 111, 274 : 116, 275 : 114, 276 : 113, 277 : 118, 278 : 110, 279 : 115, 280 : 112, 281 : 117, 282 : 67, 283 : 68, 284 : 69, 285 : 70, 286 : 71, 287 : 72, 288 : 73, 289 : 74, 290 : 75, 291 : 76, 292 : 95, 293 : 96, 300 : 77, 301 : 66, 302 : 78, 303 : 62, 304 : 50, 305 : 105, 306 : 37, 307 : 108, 308 : 64, 311 : 133, 312 : 134, 313 : 92, 315 : 146, 316 : 107, 317 : 107, 318 : 127 } # In[ ]: SELFIP = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0] IP = "192.168.1.110" PORT = 12345 RES = (PyMouse().screen_size()[0] - 1, PyMouse().screen_size()[1] - 1, 0) CRES = (1920, 1080, 0) os.environ["SDL_VIDEO_WINDOW_POS"] = "0,0" lastSS = 0 Broadcasters = [] LOC = 3 condition = [PyMouse().position, pygame.mouse.get_pos] cI = [0,-1,0] # In[ ]: def cls(): os.system('cls' if os.name=='nt' else 'clear') # In[ ]: def broadcastListener(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.settimeout(5) s.bind(('', PORT)) s.setblocking(0) result = select.select([s],[],[],10) if result[0]: packet = result[0][0].recv(1024) s.close() return packet.decode() else: s.close() return None # In[ ]: while True: print("Waiting for new client...") newClient = broadcastListener() if newClient and newClient not in Broadcasters: Broadcasters.append(newClient) print('[0] Keep looking') for i in range(len(Broadcasters)): print('[' + str(i+1) + ']', Broadcasters[i]) dest = int(input("Select Client: ")) if dest == 0: cls() continue else: cls() print(Broadcasters[dest-1].split("|")[1], "has been selected as client.") IP = Broadcasters[dest-1].split("|")[0] CRES = Broadcasters[dest-1].split("|")[2] CRES = (int(CRES.split(",")[0]), int(CRES.split(",")[1]), 0) break # In[ ]: prompt = """ [1]UP ---------------- | | | Server | [2]LEFT | | [3] RIGHT | Screen | | | ---------------- [4]DOWN """ # In[ ]: while True: try: print(prompt) LOC = int(input("\nSelect the location of the client: ")) if LOC not in [1,2,3,4]: cls() continue elif LOC == 1: cI = [-1,1,1] break elif LOC == 2: cI = [-1,0,0] break elif LOC == 3: cI = [0,-1,0] break elif LOC == 4: cI = [1,-1,1] break except ValueError: continue # In[ ]: def share(): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udpTuple = (IP, PORT) while True: # ON SELF while not condition[0]()[cI[2]] == RES[cI[0]]: continue posbeforetoggle = (abs(CRES[cI[1]] - 5), int(condition[0]()[1] / (RES[1]+1) * CRES[1])) if LOC in [2,3] else (int(condition[0]()[0] / (RES[0]+1) * CRES[0]) ,abs(CRES[cI[1]] - 5)) pygame.init() os.system("import -window root screen.png") lastSS = time.time() screen = pygame.display.set_mode((RES[0]+1, RES[1]+1), pygame.NOFRAME) screen = pygame.display.set_mode((RES[0]+1, RES[1]+1), pygame.NOFRAME) background_image = pygame.image.load('screen.png').convert() screen.blit(background_image, (0,0)) pygame.display.flip() pygame.mouse.set_pos(posbeforetoggle) pygame.mouse.set_cursor((8,8),(0,0),(0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0)) pygame.event.set_grab(True) # ON CLIENT # SS = threading.Thread(name='takeSS', target=takeSS) # SS.start() while True: if condition[1]()[cI[2]] == RES[cI[1]]: pygame.mouse.set_pos((abs(RES[cI[0]]-1), int(condition[1]()[1] / CRES[1] * (RES[1]+1)))) if LOC in [2,3] else pygame.mouse.set_pos((int(condition[1]()[0] / CRES[0] * (RES[0]+1)), abs(RES[cI[0]]-1))) break for event in pygame.event.get(): if event.type == pygame.MOUSEMOTION: posbeforetoggle = pygame.mouse.get_pos() s.sendto(("M," + str(posbeforetoggle[0]) + "," + str(posbeforetoggle[1])).encode(), udpTuple) elif event.type == pygame.MOUSEBUTTONDOWN and event.button in [1,2,3]: s.sendto(("P," + str(event.pos[0]) + "," + str(event.pos[1]) + "," + str(int(-3* (event.button**2)/2 + (13*event.button)/2 - 4))).encode(), udpTuple) elif event.type == pygame.MOUSEBUTTONUP and event.button in [1,2,3]: s.sendto(("R," + str(event.pos[0]) + "," + str(event.pos[1]) + "," + str(int(-3* (event.button**2)/2 + (13*event.button)/2 - 4))).encode(), udpTuple) elif event.type == pygame.KEYDOWN: s.sendto(("P," + str(keyDict[event.key])).encode(), udpTuple) elif event.type == pygame.KEYUP: s.sendto(("R," + str(keyDict[event.key])).encode(), udpTuple) pygame.quit() # SS.do_run = False # SS.join() except: print("Exited!") pygame.quit() # In[ ]: def receivefile(file): count = 1 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((SELFIP, PORT)) s.listen() while True: conn, addr = s.accept() f = open(file + str(count),'wb') while True: bytepart = conn.recv(1024) if not bytepart: break f.write(bytepart) f.flush() f.close() conn.close() count += 1 print("\n A file has been received") s.close() # In[ ]: def sendfile(file, ClientIP): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ClientIP,PORT)) f = open(file, "rb") part = f.read(1024) while part: s.sendall(part) part = f.read(1024) f.close() s.close() cls() print(file.split("/")[-1], "has been sent!") # In[ ]: def selectFile(): root = Tk() root.withdraw() file_path = filedialog.askopenfilename() return file_path # In[ ]: shareThread = threading.Thread(name='share', target=share) shareThread.start() recvFile = threading.Thread(name='receivefile', target=receivefile, args=("receivedFile",)) recvFile.start() # In[ ]: while True: input("Press Enter to Share File") path = selectFile() if not path == '': sendfile(path, IP) # In[ ]:
run_on_bots.py
#!/usr/bin/env python # Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. """Automated maintenance tool to run a script on bots. To use this script, write a self-contained python script (use a .zip if necessary), specify it on the command line and it will be packaged and triggered on all the swarming bots corresponding to the --dimension filters specified, or all the bots if no filter is specified. """ __version__ = '0.2' import json import os import shutil import string import subprocess import sys import tempfile import threading CLIENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath( __file__.decode(sys.getfilesystemencoding())))) sys.path.insert(0, CLIENT_DIR) from utils import tools tools.force_local_third_party() # third_party/ import colorama from chromium import natsort from depot_tools import fix_encoding # pylint: disable=ungrouped-imports import parallel_execution from utils import file_path from utils import tools def get_bot_list(swarming_server, dimensions): """Returns a list of swarming bots: health, quarantined, dead.""" q = '&'.join( 'dimensions=%s:%s' % (k, v) for k, v in sorted(dimensions.iteritems())) cmd = [ sys.executable, 'swarming.py', 'query', '--swarming', swarming_server, '--limit', '0', 'bots/list?' + q, ] healthy = [] quarantined = [] dead = [] results = json.loads(subprocess.check_output(cmd, cwd=CLIENT_DIR)) if not results.get('items'): return (), (), () for b in results['items']: if b['is_dead']: dead.append(b['bot_id']) elif b['quarantined']: quarantined.append(b['bot_id']) else: healthy.append(b['bot_id']) return natsort.natsorted(healthy), quarantined, dead def archive(isolate_server, script): """Archives the tool and return the sha-1.""" base_script = os.path.basename(script) isolate = { 'variables': { 'command': ['python', base_script], 'files': [base_script], }, } tempdir = tempfile.mkdtemp(prefix=u'run_on_bots') try: isolate_file = os.path.join(tempdir, 'tool.isolate') isolated_file = os.path.join(tempdir, 'tool.isolated') with open(isolate_file, 'wb') as f: f.write(str(isolate)) shutil.copyfile(script, os.path.join(tempdir, base_script)) cmd = [ sys.executable, 'isolate.py', 'archive', '--isolate-server', isolate_server, '-i', isolate_file, '-s', isolated_file, ] return subprocess.check_output(cmd, cwd=CLIENT_DIR).split()[0] finally: file_path.rmtree(tempdir) def batched_subprocess(cmd, sem): def run(cmd, sem): subprocess.call(cmd, cwd=CLIENT_DIR) sem.release() sem.acquire() thread = threading.Thread(target=run, args=(cmd, sem)) thread.start() return thread def run_batches( swarming_server, isolate_server, dimensions, tags, env, priority, deadline, batches, repeat, isolated_hash, name, bots, args): """Runs the task |batches| at a time. This will be mainly bound by task scheduling latency, especially if the bots are busy and the priority is low. """ sem = threading.Semaphore(batches) threads = [] for i in xrange(repeat): for bot in bots: suffix = '/%d' % i if repeat > 1 else '' task_name = parallel_execution.task_to_name( name, {'id': bot}, isolated_hash) + suffix cmd = [ sys.executable, 'swarming.py', 'run', '--swarming', swarming_server, '--isolate-server', isolate_server, '--priority', priority, '--deadline', deadline, '--dimension', 'id', bot, '--task-name', task_name, '-s', isolated_hash, ] for k, v in sorted(dimensions.iteritems()): cmd.extend(('-d', k, v)) for t in sorted(tags): cmd.extend(('--tags', t)) for k, v in env: cmd.extend(('--env', k, v)) if args: cmd.append('--') cmd.extend(args) threads.append(batched_subprocess(cmd, sem)) for t in threads: t.join() def run_serial( swarming_server, isolate_server, dimensions, tags, env, priority, deadline, repeat, isolated_hash, name, bots, args): """Runs the task one at a time. This will be mainly bound by task scheduling latency, especially if the bots are busy and the priority is low. """ result = 0 for i in xrange(repeat): for bot in bots: suffix = '/%d' % i if repeat > 1 else '' task_name = parallel_execution.task_to_name( name, {'id': bot}, isolated_hash) + suffix cmd = [ sys.executable, 'swarming.py', 'run', '--swarming', swarming_server, '--isolate-server', isolate_server, '--priority', priority, '--deadline', deadline, '--dimension', 'id', bot, '--task-name', task_name, '-s', isolated_hash, ] for k, v in sorted(dimensions.iteritems()): cmd.extend(('-d', k, v)) for t in sorted(tags): cmd.extend(('--tags', t)) for k, v in env: cmd.extend(('--env', k, v)) if args: cmd.append('--') cmd.extend(args) r = subprocess.call(cmd, cwd=CLIENT_DIR) result = max(r, result) return result def run_parallel( swarming_server, isolate_server, dimensions, env, priority, deadline, repeat, isolated_hash, name, bots, args): tasks = [] for i in xrange(repeat): suffix = '/%d' % i if repeat > 1 else '' for bot in bots: d = {'id': bot} tname = parallel_execution.task_to_name(name, d, isolated_hash) + suffix d.update(dimensions) tasks.append((tname, isolated_hash, d, env)) extra_args = ['--priority', priority, '--deadline', deadline] extra_args.extend(args) print('Using priority %s' % priority) for failed_task in parallel_execution.run_swarming_tasks_parallel( swarming_server, isolate_server, extra_args, tasks): _name, dimensions, stdout = failed_task print('%sFailure: %s%s\n%s' % ( colorama.Fore.RED, dimensions, colorama.Fore.RESET, stdout)) def main(): parser = parallel_execution.OptionParser( usage='%prog [options] (script.py|isolated hash) ' '-- [script.py arguments]', version=__version__) parser.add_option( '--serial', action='store_true', help='Runs the task serially, to be used when debugging problems since ' 'it\'s slow') parser.add_option( '--batches', type='int', default=0, help='Runs a task in parallel |batches| at a time.') parser.add_option( '--tags', action='append', default=[], metavar='FOO:BAR', help='Tags to assign to the task.') parser.add_option( '--repeat', type='int', default=1, help='Runs the task multiple time on each bot, meant to be used as a ' 'load test') parser.add_option( '--name', help='Name to use when providing an isolated hash') options, args = parser.parse_args() if len(args) < 1: parser.error( 'Must pass one python script to run. Use --help for more details') if not options.priority: parser.error( 'Please provide the --priority option. Either use a very low number\n' 'so the task completes as fast as possible, or an high number so the\n' 'task only runs when the bot is idle.') # 1. Archive the script to run. if not os.path.exists(args[0]): if not options.name: parser.error( 'Please provide --name when using an isolated hash.') if len(args[0]) not in (40, 64): parser.error( 'Hash wrong length %d (%r)' % (len(args.hash), args[0])) for i, c in enumerate(args[0]): if c not in string.hexdigits: parser.error( 'Hash character invalid\n' ' %s\n' % args[0] + ' '+'-'*i+'^\n' ) isolated_hash = args[0] name = options.name else: isolated_hash = archive(options.isolate_server, args[0]) name = os.path.basename(args[0]) print('Running %s' % isolated_hash) # 2. Query the bots list. bots, quarantined_bots, dead_bots = get_bot_list( options.swarming, options.dimensions) print('Found %d bots to process' % len(bots)) if quarantined_bots: print('Warning: found %d quarantined bots' % len(quarantined_bots)) if dead_bots: print('Warning: found %d dead bots' % len(dead_bots)) if not bots: return 1 # 3. Trigger the tasks. if options.batches > 0: return run_batches( options.swarming, options.isolate_server, options.dimensions, options.tags, options.env, str(options.priority), str(options.deadline), options.batches, options.repeat, isolated_hash, name, bots, args[1:]) if options.serial: return run_serial( options.swarming, options.isolate_server, options.dimensions, options.tags, options.env, str(options.priority), str(options.deadline), options.repeat, isolated_hash, name, bots, args[1:]) return run_parallel( options.swarming, options.isolate_server, options.dimensions, options.env, str(options.priority), str(options.deadline), options.repeat, isolated_hash, name, bots, args[1:]) if __name__ == '__main__': fix_encoding.fix_encoding() tools.disable_buffering() colorama.init() sys.exit(main())
hub.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import copy import os import select import socket import threading import time import uuid import warnings from ..extern.six.moves import queue, range from ..extern.six.moves import xmlrpc_client as xmlrpc from ..extern.six.moves.urllib.parse import urlunparse from .. import log from .constants import SAMP_STATUS_OK from .constants import __profile_version__ from .errors import SAMPWarning, SAMPHubError, SAMPProxyError from .utils import internet_on, ServerProxyPool, _HubAsClient from .lockfile_helpers import read_lockfile, create_lock_file from .standard_profile import ThreadingXMLRPCServer from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog __all__ = ['SAMPHubServer', 'WebProfileDialog'] __doctest_skip__ = ['.', 'SAMPHubServer.*'] class SAMPHubServer(object): """ SAMP Hub Server. Parameters ---------- secret : str, optional The secret code to use for the SAMP lockfile. If none is is specified, the :func:`uuid.uuid1` function is used to generate one. addr : str, optional Listening address (or IP). This defaults to 127.0.0.1 if the internet is not reachable, otherwise it defaults to the host name. port : int, optional Listening XML-RPC server socket port. If left set to 0 (the default), the operating system will select a free port. lockfile : str, optional Custom lockfile name. timeout : int, optional Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically stops after an inactivity period longer than ``timeout`` seconds. By default ``timeout`` is set to 0 (Hub never expires). client_timeout : int, optional Client inactivity timeout. If ``client_timeout > 0`` then the Hub automatically unregisters the clients which result inactive for a period longer than ``client_timeout`` seconds. By default ``client_timeout`` is set to 0 (clients never expire). mode : str, optional Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub runs using the standard ``.samp`` lock-file, having a single instance for user desktop session. Otherwise, if ``mode`` is ``'multiple'``, then the Hub runs using a non-standard lock-file, placed in ``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where ``<UUID>`` is a unique UUID assigned to the hub. label : str, optional A string used to label the Hub with a human readable name. This string is written in the lock-file assigned to the ``hub.label`` token. web_profile : bool, optional Enables or disables the Web Profile support. web_profile_dialog : class, optional Allows a class instance to be specified using ``web_profile_dialog`` to replace the terminal-based message with e.g. a GUI pop-up. Two `queue.Queue` instances will be added to the instance as attributes ``queue_request`` and ``queue_result``. When a request is received via the ``queue_request`` queue, the pop-up should be displayed, and a value of `True` or `False` should be added to ``queue_result`` depending on whether the user accepted or refused the connection. web_port : int, optional The port to use for web SAMP. This should not be changed except for testing purposes, since web SAMP should always use port 21012. pool_size : int, optional The number of socket connections opened to communicate with the clients. """ def __init__(self, secret=None, addr=None, port=0, lockfile=None, timeout=0, client_timeout=0, mode='single', label="", web_profile=True, web_profile_dialog=None, web_port=21012, pool_size=20): # Generate random ID for the hub self._id = str(uuid.uuid1()) # General settings self._is_running = False self._customlockfilename = lockfile self._lockfile = None self._addr = addr self._port = port self._mode = mode self._label = label self._timeout = timeout self._client_timeout = client_timeout self._pool_size = pool_size # Web profile specific attributes self._web_profile = web_profile self._web_profile_dialog = web_profile_dialog self._web_port = web_port self._web_profile_server = None self._web_profile_callbacks = {} self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None self._host_name = "127.0.0.1" if internet_on(): try: self._host_name = socket.getfqdn() socket.getaddrinfo(self._addr or self._host_name, self._port or 0) except socket.error: self._host_name = "127.0.0.1" # Threading stuff self._thread_lock = threading.Lock() self._thread_run = None self._thread_hub_timeout = None self._thread_client_timeout = None self._launched_threads = [] # Variables for timeout testing: self._last_activity_time = None self._client_activity_time = {} # Hub message id counter, used to create hub msg ids self._hub_msg_id_counter = 0 # Hub secret code self._hub_secret_code_customized = secret self._hub_secret = self._create_secret_code() # Hub public id (as SAMP client) self._hub_public_id = "" # Client ids # {private_key: (public_id, timestamp)} self._private_keys = {} # Metadata per client # {private_key: metadata} self._metadata = {} # List of subscribed clients per MType # {mtype: private_key list} self._mtype2ids = {} # List of subscribed MTypes per client # {private_key: mtype list} self._id2mtypes = {} # List of XML-RPC addresses per client # {public_id: (XML-RPC address, ServerProxyPool instance)} self._xmlrpc_endpoints = {} # Synchronous message id heap self._sync_msg_ids_heap = {} # Public ids counter self._client_id_counter = -1 @property def id(self): """ The unique hub ID. """ return self._id def _register_standard_api(self, server): # Standard Profile only operations server.register_function(self._ping, 'samp.hub.ping') server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback') # Standard API operations server.register_function(self._register, 'samp.hub.register') server.register_function(self._unregister, 'samp.hub.unregister') server.register_function(self._declare_metadata, 'samp.hub.declareMetadata') server.register_function(self._get_metadata, 'samp.hub.getMetadata') server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions') server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions') server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients') server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients') server.register_function(self._notify, 'samp.hub.notify') server.register_function(self._notify_all, 'samp.hub.notifyAll') server.register_function(self._call, 'samp.hub.call') server.register_function(self._call_all, 'samp.hub.callAll') server.register_function(self._call_and_wait, 'samp.hub.callAndWait') server.register_function(self._reply, 'samp.hub.reply') def _register_web_profile_api(self, server): # Web Profile methods like Standard Profile server.register_function(self._ping, 'samp.webhub.ping') server.register_function(self._unregister, 'samp.webhub.unregister') server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata') server.register_function(self._get_metadata, 'samp.webhub.getMetadata') server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions') server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions') server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients') server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients') server.register_function(self._notify, 'samp.webhub.notify') server.register_function(self._notify_all, 'samp.webhub.notifyAll') server.register_function(self._call, 'samp.webhub.call') server.register_function(self._call_all, 'samp.webhub.callAll') server.register_function(self._call_and_wait, 'samp.webhub.callAndWait') server.register_function(self._reply, 'samp.webhub.reply') # Methods particularly for Web Profile server.register_function(self._web_profile_register, 'samp.webhub.register') server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks') server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks') def _start_standard_server(self): self._server = ThreadingXMLRPCServer( (self._addr or self._host_name, self._port or 0), log, logRequests=False, allow_none=True) prot = 'http' self._port = self._server.socket.getsockname()[1] addr = "{0}:{1}".format(self._addr or self._host_name, self._port) self._url = urlunparse((prot, addr, '', '', '', '')) self._server.register_introspection_functions() self._register_standard_api(self._server) def _start_web_profile_server(self): self._web_profile_requests_queue = queue.Queue(1) self._web_profile_requests_result = queue.Queue(1) self._web_profile_requests_semaphore = queue.Queue(1) if self._web_profile_dialog is not None: # TODO: Some sort of duck-typing on the web_profile_dialog object self._web_profile_dialog.queue_request = \ self._web_profile_requests_queue self._web_profile_dialog.queue_result = \ self._web_profile_requests_result try: self._web_profile_server = WebProfileXMLRPCServer( ('localhost', self._web_port), log, logRequests=False, allow_none=True) self._web_port = self._web_profile_server.socket.getsockname()[1] self._web_profile_server.register_introspection_functions() self._register_web_profile_api(self._web_profile_server) log.info("Hub set to run with Web Profile support enabled.") except socket.error: log.warning("Port {0} already in use. Impossible to run the " "Hub with Web Profile support.".format(self._web_port), SAMPWarning) self._web_profile = False # Cleanup self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None def _launch_thread(self, group=None, target=None, name=None, args=None): # Remove inactive threads remove = [] for t in self._launched_threads: if not t.is_alive(): remove.append(t) for t in remove: self._launched_threads.remove(t) # Start new thread t = threading.Thread(group=group, target=target, name=name, args=args) t.start() # Add to list of launched threads self._launched_threads.append(t) def _join_launched_threads(self, timeout=None): for t in self._launched_threads: t.join(timeout=timeout) def _timeout_test_hub(self): if self._timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.: with self._thread_lock: if self._last_activity_time is not None: if now - self._last_activity_time >= self._timeout: warnings.warn("Timeout expired, Hub is shutting down!", SAMPWarning) self.stop() return last = now def _timeout_test_client(self): if self._client_timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.: for private_key in self._client_activity_time.keys(): if (now - self._client_activity_time[private_key] > self._client_timeout and private_key != self._hub_private_key): warnings.warn( "Client {} timeout expired!".format(private_key), SAMPWarning) self._notify_disconnection(private_key) self._unregister(private_key) last = now def _hub_as_client_request_handler(self, method, args): if method == 'samp.client.receiveCall': return self._receive_call(*args) elif method == 'samp.client.receiveNotification': return self._receive_notification(*args) elif method == 'samp.client.receiveResponse': return self._receive_response(*args) elif method == 'samp.app.ping': return self._ping(*args) def _setup_hub_as_client(self): hub_metadata = {"samp.name": "Astropy SAMP Hub", "samp.description.text": self._label, "author.name": "The Astropy Collaboration", "samp.documentation.url": "http://docs.astropy.org/en/stable/vo/samp", "samp.icon.url": self._url + "/samp/icon"} result = self._register(self._hub_secret) self._hub_public_id = result["samp.self-id"] self._hub_private_key = result["samp.private-key"] self._set_xmlrpc_callback(self._hub_private_key, self._url) self._declare_metadata(self._hub_private_key, hub_metadata) self._declare_subscriptions(self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}}) def start(self, wait=False): """ Start the current SAMP Hub instance and create the lock file. Hub start-up can be blocking or non blocking depending on the ``wait`` parameter. Parameters ---------- wait : bool If `True` then the Hub process is joined with the caller, blocking the code flow. Usually `True` option is used to run a stand-alone Hub in an executable script. If `False` (default), then the Hub process runs in a separated thread. `False` is usually used in a Python shell. """ if self._is_running: raise SAMPHubError("Hub is already running") if self._lockfile is not None: raise SAMPHubError("Hub is not running but lockfile is set") if self._web_profile: self._start_web_profile_server() self._start_standard_server() self._lockfile = create_lock_file(lockfilename=self._customlockfilename, mode=self._mode, hub_id=self.id, hub_params=self.params) self._update_last_activity_time() self._setup_hub_as_client() self._start_threads() log.info("Hub started") if wait and self._is_running: self._thread_run.join() self._thread_run = None @property def params(self): """ The hub parameters (which are written to the logfile) """ params = {} # Keys required by standard profile params['samp.secret'] = self._hub_secret params['samp.hub.xmlrpc.url'] = self._url params['samp.profile.version'] = __profile_version__ # Custom keys params['hub.id'] = self.id params['hub.label'] = self._label or "Hub {0}".format(self.id) return params def _start_threads(self): self._thread_run = threading.Thread(target=self._serve_forever) self._thread_run.daemon = True if self._timeout > 0: self._thread_hub_timeout = threading.Thread( target=self._timeout_test_hub, name="Hub timeout test") self._thread_hub_timeout.daemon = True else: self._thread_hub_timeout = None if self._client_timeout > 0: self._thread_client_timeout = threading.Thread( target=self._timeout_test_client, name="Client timeout test") self._thread_client_timeout.daemon = True else: self._thread_client_timeout = None self._is_running = True self._thread_run.start() if self._thread_hub_timeout is not None: self._thread_hub_timeout.start() if self._thread_client_timeout is not None: self._thread_client_timeout.start() def _create_secret_code(self): if self._hub_secret_code_customized is not None: return self._hub_secret_code_customized else: return str(uuid.uuid1()) def stop(self): """ Stop the current SAMP Hub instance and delete the lock file. """ if not self._is_running: return log.info("Hub is stopping...") self._notify_shutdown() self._is_running = False if self._lockfile and os.path.isfile(self._lockfile): lockfiledict = read_lockfile(self._lockfile) if lockfiledict['samp.secret'] == self._hub_secret: os.remove(self._lockfile) self._lockfile = None # Reset variables # TODO: What happens if not all threads are stopped after timeout? self._join_all_threads(timeout=10.) self._hub_msg_id_counter = 0 self._hub_secret = self._create_secret_code() self._hub_public_id = "" self._metadata = {} self._private_keys = {} self._mtype2ids = {} self._id2mtypes = {} self._xmlrpc_endpoints = {} self._last_activity_time = None log.info("Hub stopped.") def _join_all_threads(self, timeout=None): # In some cases, ``stop`` may be called from some of the sub-threads, # so we just need to make sure that we don't try and shut down the # calling thread. current_thread = threading.current_thread() if self._thread_run is not current_thread: self._thread_run.join(timeout=timeout) if not self._thread_run.is_alive(): self._thread_run = None if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread: self._thread_hub_timeout.join(timeout=timeout) if not self._thread_hub_timeout.is_alive(): self._thread_hub_timeout = None if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread: self._thread_client_timeout.join(timeout=timeout) if not self._thread_client_timeout.is_alive(): self._thread_client_timeout = None self._join_launched_threads(timeout=timeout) @property def is_running(self): """Return an information concerning the Hub running status. Returns ------- running : bool Is the hub running? """ return self._is_running def _serve_forever(self): while self._is_running: try: read_ready = select.select([self._server.socket], [], [], 0.01)[0] except select.error as exc: warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc), SAMPWarning) else: if read_ready: self._server.handle_request() if self._web_profile: # We now check if there are any connection requests from the # web profile, and if so, we initialize the pop-up. if self._web_profile_dialog is None: try: request = self._web_profile_requests_queue.get_nowait() except queue.Empty: pass else: web_profile_text_dialog(request, self._web_profile_requests_result) # We now check for requests over the web profile socket, and we # also update the pop-up in case there are any changes. try: read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0] except select.error as exc: warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc), SAMPWarning) else: if read_ready: self._web_profile_server.handle_request() self._server.server_close() if self._web_profile_server is not None: self._web_profile_server.server_close() def _notify_shutdown(self): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown") for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: self._notify_(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}}) def _notify_register(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.register", "samp.params": {"id": public_id}}) def _notify_unregister(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.unregister", "samp.params": {"id": public_id}}) def _notify_metadata(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.metadata", "samp.params": {"id": public_id, "metadata": self._metadata[private_key]} }) def _notify_subscriptions(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.subscriptions", "samp.params": {"id": public_id, "subscriptions": self._id2mtypes[private_key]} }) def _notify_disconnection(self, private_key): def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message): endpoint.samp.client.receiveNotification(private_key, hub_public_id, message) msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect") public_id = self._private_keys[private_key][0] endpoint = self._xmlrpc_endpoints[public_id][1] for mtype in msubs: if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]: log.debug("notify disconnection to {}".format(public_id)) self._launch_thread(target=_xmlrpc_call_disconnect, args=(endpoint, private_key, self._hub_public_id, {"samp.mtype": "samp.hub.disconnect", "samp.params": {"reason": "Timeout expired!"}})) def _ping(self): self._update_last_activity_time() log.debug("ping") return "1" def _query_by_metadata(self, key, value): public_id_list = [] for private_id in self._metadata: if key in self._metadata[private_id]: if self._metadata[private_id][key] == value: public_id_list.append(self._private_keys[private_id][0]) return public_id_list def _set_xmlrpc_callback(self, private_key, xmlrpc_addr): self._update_last_activity_time(private_key) if private_key in self._private_keys: if private_key == self._hub_private_key: public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = \ (xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler)) return "" # Dictionary stored with the public id log.debug("set_xmlrpc_callback: {} {}".format(private_key, xmlrpc_addr)) server_proxy_pool = None server_proxy_pool = ServerProxyPool(self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1) public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _perform_standard_register(self): with self._thread_lock: private_key, public_id = self._get_new_ids() self._private_keys[private_key] = (public_id, time.time()) self._update_last_activity_time(private_key) self._notify_register(private_key) log.debug("register: private-key = {} and self-id = {}" .format(private_key, public_id)) return {"samp.self-id": public_id, "samp.private-key": private_key, "samp.hub-id": self._hub_public_id} def _register(self, secret): self._update_last_activity_time() if secret == self._hub_secret: return self._perform_standard_register() else: # return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""} raise SAMPProxyError(7, "Bad secret code") def _get_new_ids(self): private_key = str(uuid.uuid1()) self._client_id_counter += 1 public_id = 'cli#hub' if self._client_id_counter > 0: public_id = "cli#{}".format(self._client_id_counter) return private_key, public_id def _unregister(self, private_key): self._update_last_activity_time() public_key = "" self._notify_unregister(private_key) with self._thread_lock: if private_key in self._private_keys: public_key = self._private_keys[private_key][0] del self._private_keys[private_key] else: return "" if private_key in self._metadata: del self._metadata[private_key] if private_key in self._id2mtypes: del self._id2mtypes[private_key] for mtype in self._mtype2ids.keys(): if private_key in self._mtype2ids[mtype]: self._mtype2ids[mtype].remove(private_key) if public_key in self._xmlrpc_endpoints: del self._xmlrpc_endpoints[public_key] if private_key in self._client_activity_time: del self._client_activity_time[private_key] if self._web_profile: if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] self._web_profile_server.remove_client(private_key) log.debug("unregister {} ({})".format(public_key, private_key)) return "" def _declare_metadata(self, private_key, metadata): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug("declare_metadata: private-key = {} metadata = {}" .format(private_key, str(metadata))) self._metadata[private_key] = metadata self._notify_metadata(private_key) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _get_metadata(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) log.debug("get_metadata: private-key = {} client-id = {}" .format(private_key, client_id)) if client_private_key is not None: if client_private_key in self._metadata: log.debug("--> metadata = {}" .format(self._metadata[client_private_key])) return self._metadata[client_private_key] else: return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _declare_subscriptions(self, private_key, mtypes): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug("declare_subscriptions: private-key = {} mtypes = {}" .format(private_key, str(mtypes))) # remove subscription to previous mtypes if private_key in self._id2mtypes: prev_mtypes = self._id2mtypes[private_key] for mtype in prev_mtypes: try: self._mtype2ids[mtype].remove(private_key) except ValueError: # private_key is not in list pass self._id2mtypes[private_key] = copy.deepcopy(mtypes) # remove duplicated MType for wildcard overwriting original_mtypes = copy.deepcopy(mtypes) for mtype in original_mtypes: if mtype.endswith("*"): for mtype2 in original_mtypes: if mtype2.startswith(mtype[:-1]) and \ mtype2 != mtype: if mtype2 in mtypes: del(mtypes[mtype2]) log.debug("declare_subscriptions: subscriptions accepted from " "{} => {}".format(private_key, str(mtypes))) for mtype in mtypes: if mtype in self._mtype2ids: if private_key not in self._mtype2ids[mtype]: self._mtype2ids[mtype].append(private_key) else: self._mtype2ids[mtype] = [private_key] self._notify_subscriptions(private_key) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _get_subscriptions(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) if client_private_key is not None: if client_private_key in self._id2mtypes: log.debug("get_subscriptions: client-id = {} mtypes = {}" .format(client_id, str(self._id2mtypes[client_private_key]))) return self._id2mtypes[client_private_key] else: log.debug("get_subscriptions: client-id = {} mtypes = " "missing".format(client_id)) return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _get_registered_clients(self, private_key): self._update_last_activity_time(private_key) if private_key in self._private_keys: reg_clients = [] for pkey in self._private_keys.keys(): if pkey != private_key: reg_clients.append(self._private_keys[pkey][0]) log.debug("get_registered_clients: private_key = {} clients = {}" .format(private_key, reg_clients)) return reg_clients else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _get_subscribed_clients(self, private_key, mtype): self._update_last_activity_time(private_key) if private_key in self._private_keys: sub_clients = {} for pkey in self._private_keys.keys(): if pkey != private_key and self._is_subscribed(pkey, mtype): sub_clients[self._private_keys[pkey][0]] = {} log.debug("get_subscribed_clients: private_key = {} mtype = {} " "clients = {}".format(private_key, mtype, sub_clients)) return sub_clients else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) @staticmethod def get_mtype_subtypes(mtype): """ Return a list containing all the possible wildcarded subtypes of MType. Parameters ---------- mtype : str MType to be parsed. Returns ------- types : list List of subtypes Examples -------- >>> from astropy.samp import SAMPHubServer >>> SAMPHubServer.get_mtype_subtypes("samp.app.ping") ['samp.app.ping', 'samp.app.*', 'samp.*', '*'] """ subtypes = [] msubs = mtype.split(".") indexes = list(range(len(msubs))) indexes.reverse() indexes.append(-1) for i in indexes: tmp_mtype = ".".join(msubs[:i + 1]) if tmp_mtype != mtype: if tmp_mtype != "": tmp_mtype = tmp_mtype + ".*" else: tmp_mtype = "*" subtypes.append(tmp_mtype) return subtypes def _is_subscribed(self, private_key, mtype): subscribed = False msubs = SAMPHubServer.get_mtype_subtypes(mtype) for msub in msubs: if msub in self._mtype2ids: if private_key in self._mtype2ids[msub]: subscribed = True return subscribed def _notify(self, private_key, recipient_id, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if self._is_subscribed(self._public_id_to_private_key(recipient_id), message["samp.mtype"]) is False: raise SAMPProxyError(2, "Client {} not subscribed to MType {}" .format(recipient_id, message["samp.mtype"])) self._launch_thread(target=self._notify_, args=(private_key, recipient_id, message)) return {} else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _notify_(self, sender_private_key, recipient_public_id, message): if sender_private_key not in self._private_keys: return sender_public_id = self._private_keys[sender_private_key][0] try: log.debug("notify {} from {} to {}".format( message["samp.mtype"], sender_public_id, recipient_public_id)) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, message) samp_method_name = "receiveNotification" self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params) except Exception as exc: warnings.warn("{} notification from client {} to client {} " "failed [{}]".format(message["samp.mtype"], sender_public_id, recipient_public_id, exc), SAMPWarning) def _notify_all(self, private_key, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError(3, "samp.mtype keyword is missing") recipient_ids = self._notify_all_(private_key, message) return recipient_ids else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _notify_all_(self, sender_private_key, message): recipient_ids = [] msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _recipient_id = self._private_keys[key][0] recipient_ids.append(_recipient_id) self._launch_thread(target=self._notify, args=(sender_private_key, _recipient_id, message) ) return recipient_ids def _call(self, private_key, recipient_id, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if self._is_subscribed(self._public_id_to_private_key(recipient_id), message["samp.mtype"]) is False: raise SAMPProxyError(2, "Client {} not subscribed to MType {}" .format(recipient_id, message["samp.mtype"])) public_id = self._private_keys[private_key][0] msg_id = self._get_new_hub_msg_id(public_id, msg_tag) self._launch_thread(target=self._call_, args=(private_key, public_id, recipient_id, msg_id, message)) return msg_id else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _call_(self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message): if sender_private_key not in self._private_keys: return try: log.debug("call {} from {} to {} ({})".format( msg_id.split(";;")[0], sender_public_id, recipient_public_id, message["samp.mtype"])) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, msg_id, message) samp_methodName = "receiveCall" self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params) except Exception as exc: warnings.warn("{} call {} from client {} to client {} failed " "[{},{}]".format(message["samp.mtype"], msg_id.split(";;")[0], sender_public_id, recipient_public_id, type(exc), exc), SAMPWarning) def _call_all(self, private_key, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError(3, "samp.mtype keyword is missing in " "message tagged as {}".format(msg_tag)) public_id = self._private_keys[private_key][0] msg_id = self._call_all_(private_key, public_id, msg_tag, message) return msg_id else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message): msg_id = {} msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag) receiver_public_id = self._private_keys[key][0] msg_id[receiver_public_id] = _msg_id self._launch_thread(target=self._call_, args=(sender_private_key, sender_public_id, receiver_public_id, _msg_id, message)) return msg_id def _call_and_wait(self, private_key, recipient_id, message, timeout): self._update_last_activity_time(private_key) if private_key in self._private_keys: timeout = int(timeout) now = time.time() response = {} msg_id = self._call(private_key, recipient_id, "samp::sync::call", message) self._sync_msg_ids_heap[msg_id] = None while self._is_running: if timeout > 0 and time.time() - now >= timeout: del(self._sync_msg_ids_heap[msg_id]) raise SAMPProxyError(1, "Timeout expired!") if self._sync_msg_ids_heap[msg_id] is not None: response = copy.deepcopy(self._sync_msg_ids_heap[msg_id]) del(self._sync_msg_ids_heap[msg_id]) break time.sleep(0.01) return response else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _reply(self, private_key, msg_id, response): """ The main method that gets called for replying. This starts up an asynchronous reply thread and returns. """ self._update_last_activity_time(private_key) if private_key in self._private_keys: self._launch_thread(target=self._reply_, args=(private_key, msg_id, response)) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return {} def _reply_(self, responder_private_key, msg_id, response): if responder_private_key not in self._private_keys or not msg_id: return responder_public_id = self._private_keys[responder_private_key][0] counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3) try: log.debug("reply {} from {} to {}".format( counter, responder_public_id, recipient_public_id)) if recipient_msg_tag == "samp::sync::call": if msg_id in self._sync_msg_ids_heap.keys(): self._sync_msg_ids_heap[msg_id] = response else: recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (responder_public_id, recipient_msg_tag, response) samp_method_name = "receiveResponse" self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params) except Exception as exc: warnings.warn("{} reply from client {} to client {} failed [{}]" .format(recipient_msg_tag, responder_public_id, recipient_public_id, exc), SAMPWarning) def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params): """ This method is used to retry a SAMP call several times. Parameters ---------- recipient_private_key The private key of the receiver of the call recipient_public_key The public key of the receiver of the call samp_method_name : str The name of the SAMP method to call arg_params : tuple Any additional arguments to be passed to the SAMP method """ if recipient_private_key is None: raise SAMPHubError("Invalid client ID") from . import conf for attempt in range(conf.n_retries): if not self._is_running: time.sleep(0.01) continue try: if (self._web_profile and recipient_private_key in self._web_profile_callbacks): # Web Profile callback = {"samp.methodName": samp_method_name, "samp.params": arg_params} self._web_profile_callbacks[recipient_private_key].put(callback) else: # Standard Profile hub = self._xmlrpc_endpoints[recipient_public_id][1] getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params) except xmlrpc.Fault as exc: log.debug("{} XML-RPC endpoint error (attempt {}): {}" .format(recipient_public_id, attempt + 1, exc.faultString)) time.sleep(0.01) else: return # If we are here, then the above attempts failed error_message = samp_method_name + " failed after " + str(conf.n_retries) + " attempts" raise SAMPHubError(error_message) def _public_id_to_private_key(self, public_id): for private_key in self._private_keys.keys(): if self._private_keys[private_key][0] == public_id: return private_key return None def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id): with self._thread_lock: self._hub_msg_id_counter += 1 return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter, self._hub_public_id, sender_public_id, sender_msg_id) def _update_last_activity_time(self, private_key=None): with self._thread_lock: self._last_activity_time = time.time() if private_key is not None: self._client_activity_time[private_key] = time.time() def _receive_notification(self, private_key, sender_id, message): return "" def _receive_call(self, private_key, sender_id, msg_id, message): if private_key == self._hub_private_key: if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping": self._reply(self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {}}) elif ("samp.mtype" in message and (message["samp.mtype"] == "x-samp.query.by-meta" or message["samp.mtype"] == "samp.query.by-meta")): ids_list = self._query_by_metadata(message["samp.params"]["key"], message["samp.params"]["value"]) self._reply(self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}}) return "" else: return "" def _receive_response(self, private_key, responder_id, msg_tag, response): return "" def _web_profile_register(self, identity_info, client_address=("unknown", 0), origin="unknown"): self._update_last_activity_time() if not client_address[0] in ["localhost", "127.0.0.1"]: raise SAMPProxyError(403, "Request of registration rejected " "by the Hub.") if not origin: origin = "unknown" if isinstance(identity_info, dict): # an old version of the protocol provided just a string with the app name if "samp.name" not in identity_info: raise SAMPProxyError(403, "Request of registration rejected " "by the Hub (application name not " "provided).") # Red semaphore for the other threads self._web_profile_requests_semaphore.put("wait") # Set the request to be displayed for the current thread self._web_profile_requests_queue.put((identity_info, client_address, origin)) # Get the popup dialogue response response = self._web_profile_requests_result.get() # OK, semaphore green self._web_profile_requests_semaphore.get() if response: register_map = self._perform_standard_register() translator_url = ("http://localhost:{}/translator/{}?ref=" .format(self._web_port, register_map["samp.private-key"])) register_map["samp.url-translator"] = translator_url self._web_profile_server.add_client(register_map["samp.private-key"]) return register_map else: raise SAMPProxyError(403, "Request of registration rejected by " "the user.") def _web_profile_allowReverseCallbacks(self, private_key, allow): self._update_last_activity_time() if private_key in self._private_keys: if allow == "0": if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] else: self._web_profile_callbacks[private_key] = queue.Queue() else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _web_profile_pullCallbacks(self, private_key, timeout_secs): self._update_last_activity_time() if private_key in self._private_keys: callback = [] callback_queue = self._web_profile_callbacks[private_key] try: while self._is_running: item_queued = callback_queue.get_nowait() callback.append(item_queued) except queue.Empty: pass return callback else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) class WebProfileDialog(object): """ A base class to make writing Web Profile GUI consent dialogs easier. The concrete class must: 1) Poll ``handle_queue`` periodically, using the timer services of the GUI's event loop. This function will call ``self.show_dialog`` when a request requires authorization. ``self.show_dialog`` will be given the arguments: - ``samp_name``: The name of the application making the request. - ``details``: A dictionary of details about the client making the request. - ``client``: A hostname, port pair containing the client address. - ``origin``: A string containing the origin of the request. 2) Call ``consent`` or ``reject`` based on the user's response to the dialog. """ def handle_queue(self): try: request = self.queue_request.get_nowait() except queue.Empty: # queue is set but empty pass except AttributeError: # queue has not been set yet pass else: if isinstance(request[0], str): # To support the old protocol version samp_name = request[0] else: samp_name = request[0]["samp.name"] self.show_dialog(samp_name, request[0], request[1], request[2]) def consent(self): self.queue_result.put(True) def reject(self): self.queue_result.put(False)
utils.py
# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for testing pymongo """ import contextlib import copy import functools import os import re import shutil import sys import threading import time import unittest import warnings from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user from bson import json_util from bson.objectid import ObjectId from bson.son import SON from pymongo import MongoClient, monitoring, operations, read_preferences from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.uri_parser import parse_uri from pymongo.write_concern import WriteConcern IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) class BaseListener(object): def __init__(self): self.events = [] def reset(self): self.events = [] def add_event(self, event): self.events.append(event) def event_count(self, event_type): return len(self.events_by_type(event_type)) def events_by_type(self, event_type): """Return the matching events by event class. event_type can be a single class or a tuple of classes. """ return self.matching(lambda e: isinstance(e, event_type)) def matching(self, matcher): """Return the matching events.""" return [event for event in self.events[:] if matcher(event)] def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): self.add_event(event) def connection_ready(self, event): self.add_event(event) def connection_closed(self, event): self.add_event(event) def connection_check_out_started(self, event): self.add_event(event) def connection_check_out_failed(self, event): self.add_event(event) def connection_checked_out(self, event): self.add_event(event) def connection_checked_in(self, event): self.add_event(event) def pool_created(self, event): self.add_event(event) def pool_ready(self, event): self.add_event(event) def pool_cleared(self, event): self.add_event(event) def pool_closed(self, event): self.add_event(event) class EventListener(monitoring.CommandListener): def __init__(self): self.results = defaultdict(list) def started(self, event): self.results["started"].append(event) def succeeded(self, event): self.results["succeeded"].append(event) def failed(self, event): self.results["failed"].append(event) def started_command_names(self): """Return list of command names started.""" return [event.command_name for event in self.results["started"]] def reset(self): """Reset the state of this listener.""" self.results.clear() class TopologyEventListener(monitoring.TopologyListener): def __init__(self): self.results = defaultdict(list) def closed(self, event): self.results["closed"].append(event) def description_changed(self, event): self.results["description_changed"].append(event) def opened(self, event): self.results["opened"].append(event) def reset(self): """Reset the state of this listener.""" self.results.clear() class AllowListEventListener(EventListener): def __init__(self, *commands): self.commands = set(commands) super(AllowListEventListener, self).__init__() def started(self, event): if event.command_name in self.commands: super(AllowListEventListener, self).started(event) def succeeded(self, event): if event.command_name in self.commands: super(AllowListEventListener, self).succeeded(event) def failed(self, event): if event.command_name in self.commands: super(AllowListEventListener, self).failed(event) class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) class _ServerEventListener(object): """Listens to all events.""" def __init__(self): self.results = [] def opened(self, event): self.results.append(event) def description_changed(self, event): self.results.append(event) def closed(self, event): self.results.append(event) def matching(self, matcher): """Return the matching events.""" results = self.results[:] return [event for event in results if matcher(event)] def reset(self): self.results = [] class ServerEventListener(_ServerEventListener, monitoring.ServerListener): """Listens to Server events.""" class ServerAndTopologyEventListener( # type: ignore[misc] ServerEventListener, monitoring.TopologyListener ): """Listens to Server and Topology events.""" class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" def started(self, event): self.add_event(event) def succeeded(self, event): self.add_event(event) def failed(self, event): self.add_event(event) class MockSocketInfo(object): def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False def close_socket(self, reason): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class MockPool(object): def __init__(self, address, options, handshake=True): self.gen = _PoolGeneration() self._lock = threading.Lock() self.opts = options self.operation_count = 0 def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) def get_socket(self, handler=None): return MockSocketInfo() def return_socket(self, *args, **kwargs): pass def _reset(self, service_id=None): with self._lock: self.gen.inc(service_id) def ready(self): pass def reset(self, service_id=None): self._reset() def reset_without_pause(self): self._reset() def close(self): self._reset() def update_is_writable(self, is_writable): pass def remove_stale_sockets(self, *args, **kwargs): pass class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" def __init__(self, data): def convert(v): if isinstance(v, abc.Mapping): return ScenarioDict(v) if isinstance(v, (str, bytes)): return v if isinstance(v, abc.Sequence): return [convert(item) for item in v] return v dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) def __getitem__(self, item): try: return dict.__getitem__(self, item) except KeyError: # Unlike a defaultdict, don't set the key, just return a dict. return ScenarioDict({}) class CompareType(object): """Class that compares equal to any object of the given type.""" def __init__(self, type): self.type = type def __eq__(self, other): return isinstance(other, self.type) def __ne__(self, other): """Needed for Python 2.""" return not self.__eq__(other) class FunctionCallRecorder(object): """Utility class to wrap a callable and record its invocations.""" def __init__(self, function): self._function = function self._call_list = [] def __call__(self, *args, **kwargs): self._call_list.append((args, kwargs)) return self._function(*args, **kwargs) def reset(self): """Wipes the call list.""" self._call_list = [] def call_list(self): """Returns a copy of the call list.""" return self._call_list[:] @property def call_count(self): """Returns the number of times the function has been called.""" return len(self._call_list) class TestCreator(object): """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. :Parameters: - `create_test`: callback that returns a test case. The callback must accept the following arguments - a dictionary containing the entire test specification (the `scenario_def`), a dictionary containing the specification for which the test case will be generated (the `test_def`). - `test_class`: the unittest.TestCase class in which to create the test case. - `test_path`: path to the directory containing the JSON files with the test specifications. """ self._create_test = create_test self._test_class = test_class self.test_path = test_path def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a test case.""" if "minServerVersion" in scenario_def: min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) if "maxServerVersion" in scenario_def: max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) if "serverless" in scenario_def: serverless = scenario_def["serverless"] if serverless == "require": serverless_satisfied = client_context.serverless elif serverless == "forbid": serverless_satisfied = not client_context.serverless else: # unset or "allow" serverless_satisfied = True method = unittest.skipUnless( serverless_satisfied, "Serverless requirement not satisfied" )(method) return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) ) @staticmethod def min_server_version(run_on_req): version = run_on_req.get("minServerVersion") if version: min_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): version = run_on_req.get("maxServerVersion") if version: max_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version <= max_ver return True @staticmethod def valid_auth_enabled(run_on_req): if "authEnabled" in run_on_req: if run_on_req["authEnabled"]: return client_context.auth_enabled return not client_context.auth_enabled return True @staticmethod def serverless_ok(run_on_req): serverless = run_on_req["serverless"] if serverless == "require": return client_context.serverless elif serverless == "forbid": return not client_context.serverless else: # unset or "allow" return True def should_run_on(self, scenario_def): run_on = scenario_def.get("runOn", []) if not run_on: # Always run these tests. return True for req in run_on: if ( self.valid_topology(req) and self.min_server_version(req) and self.max_server_version(req) and self.valid_auth_enabled(req) and self.serverless_ok(req) ): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( lambda: self.should_run_on(scenario_def), "runOn not satisfied", method ) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" return scenario_def["tests"] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: # Use tz_aware=False to match how CodecOptions decodes # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( json_util.loads(scenario_stream.read(), json_options=opts) ) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): test_name = "test_%s_%s_%s" % ( dirname, test_type.replace("-", "_").replace(".", "_"), str(test_def["description"].replace(" ", "_").replace(".", "_")), ) new_test = self._create_test(scenario_def, test_def, test_name) new_test = self._ensure_min_max_server_version(scenario_def, new_test) new_test = self.ensure_run_on(scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) def _connection_string(h): if h.startswith("mongodb://") or h.startswith("mongodb+srv://"): return h return "mongodb://%s" % (str(h),) def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options: dict = client_context.default_client_options.copy() if client_context.replica_set_name and not directConnection: client_options["replicaSet"] = client_context.replica_set_name if directConnection is not None: client_options["directConnection"] = directConnection client_options.update(kwargs) uri = _connection_string(host) if client_context.auth_enabled and authenticate: # Only add the default username or password if one is not provided. res = parse_uri(uri) if ( not res["username"] and not res["password"] and "username" not in client_options and "password" not in client_options ): client_options["username"] = db_user client_options["password"] = db_pwd return MongoClient(uri, port, **client_options) def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) def single_client(h=None, p=None, **kwargs): """Make a direct connection, and authenticate if necessary.""" return _mongo_client(h, p, directConnection=True, **kwargs) def rs_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) def rs_client(h=None, p=None, **kwargs): """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) def rs_or_single_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. """ return _mongo_client(h, p, authenticate=False, **kwargs) def rs_or_single_client(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. """ return _mongo_client(h, p, **kwargs) def ensure_all_connected(client): """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ hello = client.admin.command(HelloCompat.LEGACY_CMD) if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") target_host_list = set(hello["hosts"]) connected_host_list = set([hello["me"]]) admindb = client.get_database("admin") # Run hello until we have connected to each host at least once. while connected_host_list != target_host_list: hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) connected_host_list.update([hello["me"]]) def one(s): """Get one element of a set""" return next(iter(s)) def oid_generated_on_process(oid): """Makes a determination as to whether the given ObjectId was generated by the current process, based on the 5-byte random number in the ObjectId. """ return ObjectId._random() == oid.binary[4:9] def delay(sec): return """function() { sleep(%f * 1000); return true; }""" % sec def get_command_line(client): command_line = client.admin.command("getCmdLineOpts") assert command_line["ok"] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def camel_to_upper_camel(camel): return camel[0].upper() + camel[1:] def camel_to_snake_args(arguments): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) arguments[c2s] = arguments.pop(arg_name) return arguments def snake_to_camel(snake): # Regex to convert snake_case to lowerCamelCase. return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) def parse_collection_options(opts): if "readPreference" in opts: opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) if "writeConcern" in opts: opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) return opts def server_started_with_option(client, cmdline_opt, config_opt): """Check if the server was started with a particular option. :Parameters: - `cmdline_opt`: The command line option (i.e. --nojournal) - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) if "parsed" in command_line: parsed = command_line["parsed"] if config_opt in parsed: return parsed[config_opt] argv = command_line["argv"] return cmdline_opt in argv def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: msg = e.details.get("errmsg", "") # type: ignore if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. return True raise # MongoDB >= 2.0 if "parsed" in command_line: parsed = command_line["parsed"] # MongoDB >= 2.6 if "security" in parsed: security = parsed["security"] # >= rc3 if "authorization" in security: return security["authorization"] == "enabled" # < rc3 return security.get("auth", False) or bool(security.get("keyFile")) return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy argv = command_line["argv"] return "--auth" in argv or "--keyFile" in argv def drop_collections(db): # Drop all non-system collections in this database. for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): """Join threads with a 5-minute timeout, assert joins succeeded""" for t in threads: t.join(300) assert not t.is_alive(), "Thread %s hung" % t def connected(client): """Convenience to wait for a newly-constructed client to connect.""" with warnings.catch_warnings(): # Ignore warning that ping is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) client.admin.command("ping") # Force connection. return client def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value. """ start = time.time() interval = min(float(timeout) / 100, 0.1) while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(interval) def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" cmd = SON([("replSetStepDown", 1)]) cmd.update(kwargs) # Unfreeze a secondary to ensure a speedy election. client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) client.admin.command(cmd) def is_mongos(client): res = client.admin.command(HelloCompat.LEGACY_CMD) return res.get("msg", "") == "isdbgrid" def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a specific class of exception, and not a subclass. E.g., check that MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. """ try: fn(*args, **kwargs) except Exception as e: assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) @contextlib.contextmanager def _ignore_deprecations(): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) yield def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): return wrapped(*args, **kwargs) return wrapper else: return _ignore_deprecations() class DeprecationFilter(object): def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() self.warn_context.__enter__() warnings.simplefilter(action, DeprecationWarning) def stop(self): """Stop filtering deprecations.""" self.warn_context.__exit__() # type: ignore self.warn_context = None # type: ignore def get_pool(client): """Get the standalone, primary, or mongos pool.""" topology = client._get_topology() server = topology.select_server(writable_server_selector) return server.pool def get_pools(client): """Get all pools.""" return [server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. NTRIALS = 5 NTHREADS = 10 def run_threads(collection, target): """Run a target function in many threads. target is a function taking a Collection and an integer. """ threads = [] for i in range(NTHREADS): bound_target = partial(target, collection, i) threads.append(threading.Thread(target=bound_target)) for t in threads: t.start() for t in threads: t.join(60) assert not t.is_alive() @contextlib.contextmanager def frequent_thread_switches(): """Make concurrency bugs more likely to manifest.""" interval = sys.getswitchinterval() sys.setswitchinterval(1e-6) try: yield finally: sys.setswitchinterval(interval) def lazy_client_trial(reset, target, test, get_client): """Test concurrent operations on a lazily-connecting client. `reset` takes a collection and resets it for the next trial. `target` takes a lazily-connecting collection and an index from 0 to NTHREADS, and performs some operation, e.g. an insert. `test` takes the lazily-connecting collection and asserts a post-condition to prove `target` succeeded. """ collection = client_context.client.pymongo_test.test with frequent_thread_switches(): for i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test run_threads(lazy_collection, target) test(lazy_collection) def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" # In Python 3.6 importing gevent.socket raises an ImportWarning. with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) try: import socket import gevent.socket return socket.socket is gevent.socket.socket except ImportError: return False def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" import threading return threading.current_thread.__module__ == "eventlet.green.threading" def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() def disable_replication(client): """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") def enable_replication(client): """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" def __init__(self, *args, **kwargs): self.exc = None super(ExceptionCatchingThread, self).__init__(*args, **kwargs) def run(self): try: super(ExceptionCatchingThread, self).run() except BaseException as exc: self.exc = exc raise def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. mode_string = pref.get("mode", "primary") mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref.get("maxStalenessSeconds", -1) tag_sets = pref.get("tag_sets") return read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness ) def server_name_to_type(name): """Convert a ServerType name to the corresponding value. For SDAM tests.""" # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. if name == "PossiblePrimary": return SERVER_TYPE.Unknown return getattr(SERVER_TYPE, name) def cat_files(dest, *sources): """Cat multiple files into dest.""" with open(dest, "wb") as fdst: for src in sources: with open(src, "rb") as fsrc: shutil.copyfileobj(fsrc, fdst) @contextlib.contextmanager def assertion_context(msg): """A context manager that adds info to an assertion failure.""" try: yield except AssertionError as exc: msg = "%s (%s)" % (exc, msg) exc_type, exc_val, exc_tb = sys.exc_info() assert exc_type is not None raise exc_type(exc_val).with_traceback(exc_tb) def parse_spec_options(opts): if "readPreference" in opts: opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) if "writeConcern" in opts: opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) if "maxTimeMS" in opts: opts["max_time_ms"] = opts.pop("maxTimeMS") if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") if "hint" in opts: hint = opts.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) opts["hint"] = hint # Properly format 'hint' arguments for the Bulk API tests. if "requests" in opts: reqs = opts.pop("requests") for req in reqs: if "name" in req: # CRUD v2 format args = req.pop("arguments", {}) if "hint" in args: hint = args.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) args["hint"] = hint req["arguments"] = args else: # Unified test format bulk_model, spec = next(iter(req.items())) if "hint" in spec: hint = spec.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) spec["hint"] = hint opts["requests"] = reqs return dict(opts) def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] arguments[arg_name] = list(sort_dict.items()) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) elif c2s == "requests": # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: if "name" in request: # CRUD v2 format bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) bulk_arguments = camel_to_snake_args(request["arguments"]) else: # Unified test format bulk_model, spec = next(iter(request.items())) bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) bulk_arguments = camel_to_snake_args(spec) requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": arguments["session"] = entity_map[arguments["session"]] elif opname in ("command", "run_admin_command") and arg_name == "command": # Ensure the first key is the command name. ordered_command = SON([(spec["command_name"], 1)]) ordered_command.update(arguments["command"]) arguments["command"] = ordered_command elif opname == "open_download_stream" and arg_name == "id": arguments["file_id"] = arguments.pop(arg_name) elif opname != "find" and c2s == "max_time_ms": # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. arguments["maxTimeMS"] = arguments.pop("max_time_ms") elif opname == "with_transaction" and arg_name == "callback": if "operations" in arguments[arg_name]: # CRUD v2 format callback_ops = arguments[arg_name]["operations"] else: # Unified test format callback_ops = arguments[arg_name] arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) elif opname == "drop_collection" and arg_name == "collection": arguments["name_or_collection"] = arguments.pop(arg_name) elif opname == "create_collection": if arg_name == "collection": arguments["name"] = arguments.pop(arg_name) # Any other arguments to create_collection are passed through # **kwargs. elif opname == "create_index" and arg_name == "keys": arguments["keys"] = list(arguments.pop(arg_name).items()) elif opname == "drop_index" and arg_name == "name": arguments["index_or_name"] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name)
conn_qpid.py
#coding:utf-8 __author__=u'sam ' """ conn_qpid.py 2017/07/07 sam 1. 消息接收处理增加异常保护,防止异常抛出导致接收终止 消息处理之后进行确认,保证消息不丢失,异常产生重新接收 """ from threading import Condition from threading import Lock from threading import Thread import traceback import gevent from qpid.messaging import Connection from qpid.messaging import Message # from qpid.util import URL from camel.fundamental.amqp.base import MessageQueueType,AccessMode from camel.fundamental.utils.importutils import import_function from camel.biz.application.camelsrv import CamelApplication,instance class MQConnectionQpid(object): def __init__(self,cfg): self.type = MessageQueueType.QPID self.name = cfg.get('name') self.host = cfg.get('host') self.port = cfg.get('port') self.address = cfg.get('address') self.execthread_nr = cfg.get('exec_thread_nr',1) self.entry = cfg.get('entry') self.conn = None self.ssn = None self.producer = None self.consumer = None self.cond_readable = Condition() self.thread = None self.isclosed = True self.execthreads = [] self.message_pool = [] self.lock = Lock() self.func_list ={} #导入的函数列表 self.rw = 0 def open(self,access=AccessMode.READ): if access == 0: return self broker = "%s:%s" % (self.host, self.port) # if self.conn is not None: # return self if not self.conn: self.conn = Connection(broker, reconnect=True, tcp_nodelay=True) self.conn.open() self.ssn = self.conn.session() if access & AccessMode.READ: if not self.consumer: self.consumer = self.ssn.receiver(self.address) self.consumer.capacity = 4000 func = import_function(self.entry) # importing functions dynamically self.func_list[self.entry] = func self.thread = Thread(target=self._messageRecieving) self.thread.start() if access & AccessMode.WRITE: if not self.producer: self.producer = self.ssn.sender(self.address) return self def close(self): self.isclosed = True with self.cond_readable: self.cond_readable.notify_all() if self.conn : self.conn.close() self.conn = None self.thread.join() def _executeThread(self): """多线程""" while not self.isclosed: with self.cond_readable: self.cond_readable.wait() if self.isclosed: break message = None self.lock.acquire() if len(self.message_pool): message = self.message_pool[0] del self.message_pool[0] self.lock.release() if message is not None: func = self.func_list[self.entry] func(message) # pass into user space print 'topic read thread is exiting..' def produce(self,message): message = Message(message) self.producer.send(message, False) def _messageRecieving(self): """消息接收线程,保证一个线程接收""" self.isclosed = False for nr in range(self.execthread_nr): thread = Thread(target=self._executeThread) self.execthreads.append(thread) thread.start() while not self.isclosed: try: message = self.consumer.fetch() message = message.content # self.ssn.acknowledge(sync=False) if message is not None: func = self.func_list[self.entry] try: func(message) self.ssn.acknowledge(sync=False) except: instance.getLogger().error(u'amqp::qpid message process error. detail:{}'.format(traceback.format_exc())) # self.lock.acquire() # self.message_pool.append(message) # self.lock.release() # with self.cond_readable: # # self.cond_readable.notify() # self.cond_readable.notify_all() except: instance.getLogger().warn(u'amqp::qpid fetch message failed. detail:{}'.format(traceback.format_exc())) gevent.sleep(5) # for thread in self.execthreads: # thread.join() instance.getLogger().info( 'amqp::qpid recieve-thread is exiting...') __all__ = (MQConnectionQpid)
crawlers.py
import requests import random import chardet import os from time import sleep from queue import Queue from threading import Thread, Lock from .bloom_filter import BloomFilter from .parsers import BaseParser class BaseCrawler(object): def __init__(self, headers=None): self.new_session() self.headers = headers def get_html(self, url, headers=None): assert self.session != None headers = headers or self.headers # if headers are not specified for this single request, use the global headers content = self.session.get(url, headers=headers).content decoded_content = content.decode("utf8") # maybe not utf-8 for some sites? return decoded_content def new_session(self): self.session = requests.Session() class MultiThreadingCrawler(BaseCrawler): def __init__(self, thread_num=4, headers=None, session_num=10, index_file=None, data_folder=None): super(MultiThreadingCrawler, self).__init__(headers) self.thread_num = thread_num self.index_file = index_file or "index.txt" self.data_folder = data_folder or "html_data" self.my_parser = BaseParser() self.sessions = list() for i in range(session_num): self.sessions.append(requests.Session()) def get_html(self, url, headers=None): assert len(self.sessions) != 0, "There's no Session available!" headers = headers or self.headers current_session = random.choice(self.sessions) raw_html = current_session.get(url, headers=headers).content decoded_html = self.__decode_html(raw_html) return decoded_html def __decode_html(self, raw_html): encoding_info = chardet.detect(raw_html) try: return raw_html.decode(encoding_info["encoding"] or "utf8") except Exception as e: print("Failed to decode:", e) return raw_html.decode("utf8") def __get_links(self, html_content, current_url): return self.my_parser.parse_url(html_content, current_url) def __wirte_data(self, url, content): filename = os.path.join(self.data_folder, self.__valid_filename(url)) with open(self.index_file, mode="a", encoding="utf8") as index: index.write("{url}\t{filename}\n".format(url=url, filename=filename)) if not os.path.exists(self.data_folder): os.mkdir(self.data_folder) with open(filename, mode="w", encoding="utf8") as file: file.write(content) def __valid_filename(self, s): import string valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) s = ''.join(c for c in s if c in valid_chars) s += ".html" return s def crawl_from(self, seed, max_page, thread_num=None, sleeping=None): def crawl_single_page(): print("I'm working!") while count[0] < max_page: url = url_queue.get(timeout=10) if not crawled.check(url): print("#{0:<4} {1}".format(count[0]+1, url)) try: content = self.get_html(url) outlinks = self.__get_links(content, url) self.__wirte_data(url, content) except Exception as e: print(e) continue with lock: if count[0] >= max_page: print("Drop!") break graph[url] = outlinks for l in outlinks: url_queue.put(l) crawled.add(url) count[0] += 1 sleep(sleeping) url_queue.task_done() print("Done!") url_queue = Queue() lock = Lock() crawled = BloomFilter(max_page) graph = {} count = [0, ] url_queue.put(seed) threads = [] sleeping = sleeping or 5 print("Thread:", thread_num or self.thread_num) for i in range(thread_num or self.thread_num): t = Thread(target=crawl_single_page) threads.append(t) t.setDaemon(True) t.start() for t in threads: t.join() return graph, crawled def unit_test(): print("Unit Test Begins.") test_crawler = BaseCrawler() print("Try to get the content of https://keithnull.top/") html = test_crawler.get_html("https://keithnull.top/") assert html.find("Keith") != -1 print("Success") def debugging(): crawler = MultiThreadingCrawler() seed = "https://www.baidu.com" max_page = 10 thread_num = 4 crawler.crawl_from(seed, max_page, thread_num) if __name__ == '__main__': # unit_test() debugging()
mdrepo.py
""" This is the implementation of the active repository of SAML metadata. The 'local' and 'remote' pipes operate on this. """ from StringIO import StringIO from datetime import datetime import hashlib import urllib from UserDict import DictMixin, UserDict from lxml import etree from lxml.builder import ElementMaker from lxml.etree import DocumentInvalid import os import re from copy import deepcopy from pyff import merge_strategies import pyff.index from pyff.logs import log from pyff.utils import schema, URLFetch, filter_lang, root, duration2timedelta, template import xmlsec from pyff.constants import NS, NF_URI, DIGESTS, EVENT_DROP_ENTITY, EVENT_IMPORTED_METADATA, EVENT_IMPORT_FAIL import traceback import threading from Queue import Queue __author__ = 'leifj' def _is_self_signed_err(ebuf): for e in ebuf: if e['func'] == 'xmlSecOpenSSLX509StoreVerify' and re.match('err=18', e['message']): return True return False etree.set_default_parser(etree.XMLParser(resolve_entities=False)) def _e(error_log, m=None): def _f(x): if ":WARNING:" in x: return False if m is not None and not m in x: return False return True return "\n".join(filter(_f, ["%s" % e for e in error_log])) class MetadataException(Exception): pass class Event(UserDict): pass class Observable(object): def __init__(self): self.callbacks = [] def subscribe(self, callback): self.callbacks.append(callback) def fire(self, **attrs): e = Event(attrs) e['time'] = datetime.now() for fn in self.callbacks: fn(e) class MDRepository(DictMixin, Observable): """A class representing a set of SAML Metadata. Instances present as dict-like objects where the keys are URIs and values are EntitiesDescriptor elements containing sets of metadata. """ def __init__(self, index=pyff.index.MemoryIndex(), metadata_cache_enabled=False, min_cache_ttl="PT5M"): self.md = {} self.index = index self.metadata_cache_enabled = metadata_cache_enabled self.min_cache_ttl = min_cache_ttl self.respect_cache_duration = True self.default_cache_duration = "PT10M" self.retry_limit = 5 super(MDRepository, self).__init__() def is_idp(self, entity): """Returns True if the supplied EntityDescriptor has an IDPSSODescriptor Role :param entity: An EntityDescriptor element """ return bool(entity.find(".//{%s}IDPSSODescriptor" % NS['md']) is not None) def is_sp(self, entity): """Returns True if the supplied EntityDescriptor has an SPSSODescriptor Role :param entity: An EntityDescriptor element """ return bool(entity.find(".//{%s}SPSSODescriptor" % NS['md']) is not None) def display(self, entity): """Utility-method for computing a displayable string for a given entity. :param entity: An EntityDescriptor element """ for displayName in filter_lang(entity.findall(".//{%s}DisplayName" % NS['mdui'])): return displayName.text for serviceName in filter_lang(entity.findall(".//{%s}ServiceName" % NS['md'])): return serviceName.text for organizationDisplayName in filter_lang(entity.findall(".//{%s}OrganizationDisplayName" % NS['md'])): return organizationDisplayName.text for organizationName in filter_lang(entity.findall(".//{%s}OrganizationName" % NS['md'])): return organizationName.text return entity.get('entityID') def __iter__(self): for t in [self.md[url] for url in self.md.keys()]: for entity in t.findall(".//{%s}EntityDescriptor" % NS['md']): yield entity def sha1_id(self, e): return pyff.index.hash_id(e, 'sha1') def search(self, query, path=None, page=None, page_limit=10, entity_filter=None): """ :param query: A string to search for. :param path: The repository collection (@Name) to search in - None for search in all collections :param page: When using paged search, the page index :param page_limit: When using paged search, the maximum entry per page :param entity_filter: A lookup expression used to filter the entries before search is done. Returns a list of dict's for each EntityDescriptor present in the metadata store such that any of the DisplayName, ServiceName, OrganizationName or OrganizationDisplayName elements match the query (as in contains the query as a substring). The dict in the list contains three items: :param label: A displayable string, useful as a UI label :param value: The entityID of the EntityDescriptor :param id: A sha1-ID of the entityID - on the form {sha1}<sha1-hash-of-entityID> """ def _strings(e): lst = [e.get('entityID')] for attr in ['.//{%s}DisplayName' % NS['mdui'], './/{%s}ServiceName' % NS['md'], './/{%s}OrganizationDisplayName' % NS['md'], './/{%s}OrganizationName' % NS['md']]: lst.extend([x.text.lower() for x in e.findall(attr)]) return filter(lambda s: s is not None, lst) def _match(query, e): #log.debug("looking for %s in %s" % (query,",".join(_strings(e)))) for qstr in _strings(e): if query in qstr: return True return False f = [] if path is not None: f.append(path) if entity_filter is not None: f.append(entity_filter) mexpr = None if f: mexpr = "+".join(f) log.debug("mexpr: %s" % mexpr) res = [{'label': self.display(e), 'value': e.get('entityID'), 'id': pyff.index.hash_id(e, 'sha1')} for e in pyff.index.EntitySet(filter(lambda ent: _match(query, ent), self.lookup(mexpr)))] res.sort(key=lambda i: i['label']) log.debug(res) if page is not None: total = len(res) begin = (page - 1) * page_limit end = begin + page_limit more = (end < total) return res[begin:end], more, total else: return res def sane(self): """A very basic test for sanity. An empty metadata set is probably not a sane output of any process. :return: True iff there is at least one EntityDescriptor in the active set. """ return len(self.md) > 0 def extensions(self, e): """Return a list of the Extensions elements in the EntityDescriptor :param e: an EntityDescriptor :return: a list """ ext = e.find(".//{%s}Extensions" % NS['md']) if ext is None: ext = etree.Element("{%s}Extensions" % NS['md']) e.insert(0, ext) return ext def annotate(self, e, category, title, message, source=None): """Add an ATOM annotation to an EntityDescriptor or an EntitiesDescriptor. This is a simple way to add non-normative text annotations to metadata, eg for the purpuse of generating reports. :param e: An EntityDescriptor or an EntitiesDescriptor element :param category: The ATOM category :param title: The ATOM title :param message: The ATOM content :param source: An optional source URL. It is added as a <link> element with @rel='saml-metadata-source' """ if e.tag != "{%s}EntityDescriptor" % NS['md'] and e.tag != "{%s}EntitiesDescriptor" % NS['md']: raise MetadataException( "I can only annotate EntityDescriptor or EntitiesDescriptor elements") subject = e.get('Name', e.get('entityID', None)) atom = ElementMaker(nsmap={ 'atom': 'http://www.w3.org/2005/Atom'}, namespace='http://www.w3.org/2005/Atom') args = [atom.published("%s" % datetime.now().isoformat()), atom.link(href=subject, rel="saml-metadata-subject")] if source is not None: args.append(atom.link(href=source, rel="saml-metadata-source")) args.extend([atom.title(title), atom.category(term=category), atom.content(message, type="text/plain")]) self.extensions(e).append(atom.entry(*args)) def _entity_attributes(self, e): ext = self.extensions(e) # log.debug(ext) ea = ext.find(".//{%s}EntityAttributes" % NS['mdattr']) if ea is None: ea = etree.Element("{%s}EntityAttributes" % NS['mdattr']) ext.append(ea) return ea def _eattribute(self, e, attr, nf): ea = self._entity_attributes(e) # log.debug(ea) a = ea.xpath( ".//saml:Attribute[@NameFormat='%s' and @Name='%s']" % (nf, attr), namespaces=NS) if a is None or len(a) == 0: a = etree.Element("{%s}Attribute" % NS['saml']) a.set('NameFormat', nf) a.set('Name', attr) ea.append(a) else: a = a[0] # log.debug(etree.tostring(self.extensions(e))) return a def set_entity_attributes(self, e, d, nf=NF_URI): """Set an entity attribute on an EntityDescriptor :param e: The EntityDescriptor element :param d: A dict of attribute-value pairs that should be added as entity attributes :param nf: The nameFormat (by default "urn:oasis:names:tc:SAML:2.0:attrname-format:uri") to use. :raise: MetadataException unless e is an EntityDescriptor element """ if e.tag != "{%s}EntityDescriptor" % NS['md']: raise MetadataException( "I can only add EntityAttribute(s) to EntityDescriptor elements") #log.debug("set %s" % d) for attr, value in d.iteritems(): #log.debug("set %s to %s" % (attr,value)) a = self._eattribute(e, attr, nf) # log.debug(etree.tostring(a)) velt = etree.Element("{%s}AttributeValue" % NS['saml']) velt.text = value a.append(velt) # log.debug(etree.tostring(a)) def fetch_metadata(self, resources, qsize=5, timeout=120, stats=None, xrd=None): """Fetch a series of metadata URLs and optionally verify signatures. :param resources: A list of triples (url,cert-or-fingerprint,id) :param qsize: The number of parallell downloads to run :param timeout: The number of seconds to wait (120 by default) for each download :param stats: A dictionary used for storing statistics. Useful for cherrypy cpstats The list of triples is processed by first downloading the URL. If a cert-or-fingerprint is supplied it is used to validate the signature on the received XML. Two forms of XML is supported: SAML Metadata and XRD. SAML metadata is (if valid and contains a valid signature) stored under the 'id' identifier (which defaults to the URL unless provided in the triple. XRD elements are processed thus: for all <Link> elements that contain a ds;KeyInfo elements with a X509Certificate and where the <Rel> element contains the string 'urn:oasis:names:tc:SAML:2.0:metadata', the corresponding <URL> element is download and verified. """ if stats is None: stats = {} def producer(q, resources, cache=self.metadata_cache_enabled): print resources for url, verify, id, tries in resources: log.debug("starting fetcher for '%s'" % url) thread = URLFetch( url, verify, id, enable_cache=cache, tries=tries) thread.start() q.put(thread, True) def consumer(q, njobs, stats, next_jobs=None, resolved=None): if next_jobs is None: next_jobs = [] if resolved is None: resolved = set() nfinished = 0 while nfinished < njobs: info = None try: log.debug("waiting for next thread to finish...") thread = q.get(True) thread.join(timeout) if thread.isAlive(): raise MetadataException( "thread timeout fetching '%s'" % thread.url) info = { 'Time Spent': thread.time() } if thread.ex is not None: raise thread.ex else: if thread.result is not None: info['Bytes'] = len(thread.result) else: raise MetadataException( "empty response fetching '%s'" % thread.url) info['Cached'] = thread.cached info['Date'] = str(thread.date) info['Last-Modified'] = str(thread.last_modified) info['Tries'] = thread.tries xml = thread.result.strip() if thread.status is not None: info['Status'] = thread.resp.status_code t = self.parse_metadata( StringIO(xml), key=thread.verify, base_url=thread.url) if t is None: self.fire(type=EVENT_IMPORT_FAIL, url=thread.url) raise MetadataException( "no valid metadata found at '%s'" % thread.url) relt = root(t) if relt.tag in ('{%s}XRD' % NS['xrd'], '{%s}XRDS' % NS['xrd']): log.debug("%s looks like an xrd document" % thread.url) for xrd in t.xpath("//xrd:XRD", namespaces=NS): log.debug("xrd: %s" % xrd) for link in xrd.findall(".//{%s}Link[@rel='%s']" % (NS['xrd'], NS['md'])): url = link.get("href") certs = xmlsec.CertDict(link) fingerprints = certs.keys() fp = None if len(fingerprints) > 0: fp = fingerprints[0] log.debug("fingerprint: %s" % fp) next_jobs.append((url, fp, url, 0)) elif relt.tag in ('{%s}EntityDescriptor' % NS['md'], '{%s}EntitiesDescriptor' % NS['md']): cacheDuration = self.default_cache_duration if self.respect_cache_duration: cacheDuration = root(t).get( 'cacheDuration', self.default_cache_duration) offset = duration2timedelta(cacheDuration) if thread.cached: if thread.last_modified + offset < datetime.now() - duration2timedelta(self.min_cache_ttl): raise MetadataException( "cached metadata expired") else: log.debug("found cached metadata for '%s' (last-modified: %s)" % (thread.url, thread.last_modified)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne else: log.debug("got fresh metadata for '%s' (date: %s)" % ( thread.url, thread.date)) ne = self.import_metadata(t, url=thread.id) info['Number of Entities'] = ne info['Cache Expiration Time'] = str( thread.last_modified + offset) certs = xmlsec.CertDict(relt) cert = None if certs.values(): cert = certs.values()[0].strip() resolved.add((thread.url, cert)) else: raise MetadataException( "unknown metadata type for '%s' (%s)" % (thread.url, relt.tag)) except Exception, ex: # traceback.print_exc(ex) log.warn("problem fetching '%s' (will retry): %s" % (thread.url, ex)) if info is not None: info['Exception'] = ex if thread.tries < self.retry_limit: next_jobs.append( (thread.url, thread.verify, thread.id, thread.tries + 1)) else: # traceback.print_exc(ex) log.error( "retry limit exceeded for %s (last error was: %s)" % (thread.url, ex)) finally: nfinished += 1 if info is not None: stats[thread.url] = info resources = [(url, verify, rid, 0) for url, verify, rid in resources] resolved = set() cache = True while len(resources) > 0: log.debug("fetching %d resources (%s)" % (len(resources), repr(resources))) next_jobs = [] q = Queue(qsize) prod_thread = threading.Thread( target=producer, args=(q, resources, cache)) cons_thread = threading.Thread(target=consumer, args=( q, len(resources), stats, next_jobs, resolved)) prod_thread.start() cons_thread.start() prod_thread.join() cons_thread.join() log.debug("after fetch: %d jobs to retry" % len(next_jobs)) if len(next_jobs) > 0: resources = next_jobs cache = False else: resources = [] if xrd is not None: with open(xrd, "w") as fd: fd.write(template("trust.xrd").render(links=resolved)) def parse_metadata(self, fn, key=None, base_url=None, fail_on_error=False, filter_invalid=True): """Parse a piece of XML and split it up into EntityDescriptor elements. Each such element is stored in the MDRepository instance. :param fn: a file-like object containing SAML metadata :param key: a certificate (file) or a SHA1 fingerprint to use for signature verification :param base_url: use this base url to resolve relative URLs for XInclude processing """ try: t = etree.parse(fn, base_url=base_url, parser=etree.XMLParser(resolve_entities=False)) t.xinclude() if filter_invalid: for e in t.findall('{%s}EntityDescriptor' % NS['md']): if not schema().validate(e): error = _e(schema().error_log, m=base_url) log.debug("removing '%s': schema validation failed (%s)" % ( e.get('entityID'), error)) e.getparent().remove(e) self.fire(type=EVENT_DROP_ENTITY, url=base_url, entityID=e.get('entityID'), error=error) else: # Having removed the invalid entities this should now never # happen... schema().assertValid(t) except DocumentInvalid, ex: traceback.print_exc() log.debug("schema validation failed on '%s': %s" % ( base_url, _e(ex.error_log, m=base_url))) raise MetadataException("schema validation failed") except Exception, ex: # log.debug(_e(schema().error_log)) log.error(ex) if fail_on_error: raise ex return None if key is not None: try: log.debug("verifying signature using %s" % key) refs = xmlsec.verified(t, key) if len(refs) != 1: raise MetadataException( "XML metadata contains %d signatures - exactly 1 is required" % len(refs)) t = refs[0] # prevent wrapping attacks except Exception, ex: tb = traceback.format_exc() print tb log.error(ex) return None return t def _index_entity(self, e): #log.debug("adding %s to index" % e.get('entityID')) if 'ID' in e.attrib: del e.attrib['ID'] self.index.add(e) def import_metadata(self, t, url=None): """ :param t: An EntitiesDescriptor element :param url: An optional URL to used to identify the EntitiesDescriptor in the MDRepository Import an EntitiesDescriptor element using the @Name attribute (or the supplied url parameter). All EntityDescriptor elements are stripped of any @ID attribute and are then indexed before the collection is stored in the MDRepository object. """ if url is None: top = t.xpath("//md:EntitiesDescriptor", namespaces=NS) if top is not None and len(top) == 1: url = top[0].get("Name", None) if url is None: raise MetadataException("No collection name found") self[url] = t # we always clean incoming ID # add to the index ne = 0 if t is not None: if root(t).tag == "{%s}EntityDescriptor" % NS['md']: self._index_entity(root(t)) ne += 1 else: for e in t.findall(".//{%s}EntityDescriptor" % NS['md']): self._index_entity(e) ne += 1 self.fire(type=EVENT_IMPORTED_METADATA, size=ne, url=url) return ne def entities(self, t=None): """ :param t: An EntitiesDescriptor element Returns the list of contained EntityDescriptor elements """ if t is None: return [] elif root(t).tag == "{%s}EntityDescriptor" % NS['md']: return [root(t)] else: return t.findall(".//{%s}EntityDescriptor" % NS['md']) def load_dir(self, directory, ext=".xml", url=None): """ :param directory: A directory to walk. :param ext: Include files with this extension (default .xml) Traverse a directory tree looking for metadata. Files ending in the specified extension are included. Directories starting with '.' are excluded. """ if url is None: url = directory log.debug("walking %s" % directory) if not directory in self.md: entities = [] for top, dirs, files in os.walk(directory): for dn in dirs: if dn.startswith("."): dirs.remove(dn) for nm in files: log.debug("found file %s" % nm) if nm.endswith(ext): fn = os.path.join(top, nm) try: t = self.parse_metadata(fn, fail_on_error=True) # local metadata is assumed to be ok entities.extend(self.entities(t)) except Exception, ex: log.error(ex) self.import_metadata(self.entity_set(entities, url)) return self.md[url] def _lookup(self, member, xp=None): """ :param member: Either an entity, URL or a filter expression. Find a (set of) EntityDescriptor element(s) based on the specified 'member' expression. """ def _hash(hn, strv): if hn == 'null': return strv if not hasattr(hashlib, hn): raise MetadataException("Unknown digest mechanism: '%s'" % hn) hash_m = getattr(hashlib, hn) h = hash_m() h.update(strv) return h.hexdigest() if xp is None: xp = "//md:EntityDescriptor" if member is None: lst = [] for m in self.keys(): log.debug("resolving %s filtered by %s" % (m, xp)) lst.extend(self._lookup(m, xp)) return lst elif hasattr(member, 'xpath'): log.debug("xpath filter %s <- %s" % (xp, member)) return member.xpath(xp, namespaces=NS) elif type(member) is str or type(member) is unicode: log.debug("string lookup %s" % member) if '+' in member: member = member.strip('+') log.debug("lookup intersection of '%s'" % ' and '.join(member.split('+'))) hits = None for f in member.split("+"): f = f.strip() if hits is None: hits = set(self._lookup(f, xp)) else: other = self._lookup(f, xp) hits.intersection_update(other) if not hits: log.debug("empty intersection") return [] if hits is not None and hits: return list(hits) else: return [] if "!" in member: (src, xp) = member.split("!") if len(src) == 0: src = None log.debug("filtering using %s" % xp) else: log.debug("selecting %s filtered by %s" % (src, xp)) return self._lookup(src, xp) m = re.match("^\{(.+)\}(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) m = re.match("^(.+)=(.+)$", member) if m is not None: log.debug("attribute-value match: %s='%s'" % (m.group(1), m.group(2))) return self.index.get(m.group(1), m.group(2).rstrip("/")) log.debug("basic lookup %s" % member) for idx in DIGESTS: e = self.index.get(idx, member) if e: log.debug("found %s in %s index" % (e, idx)) return e e = self.get(member, None) if e is not None: return self._lookup(e, xp) # hackish but helps save people from their misstakes e = self.get("%s.xml" % member, None) if e is not None: if not "://" in member: # not an absolute URL log.warn( "Found %s.xml as an alias - AVOID extensions in 'select as' statements" % member) return self._lookup(e, xp) if "://" in member: # looks like a URL and wasn't an entity or collection - recurse away! log.debug("recursively fetching members from '%s'" % member) # note that this supports remote lists which may be more rope # than is healthy return [self._lookup(line, xp) for line in urllib.urlopen(member).iterlines()] return [] elif hasattr(member, '__iter__') and type(member) is not dict: if not len(member): member = self.keys() return [self._lookup(m, xp) for m in member] else: raise MetadataException("What about %s ??" % member) def lookup(self, member, xp=None): """ Lookup elements in the working metadata repository :param member: A selector (cf below) :type member: basestring :param xp: An optional xpath filter :type xp: basestring :return: An interable of EntityDescriptor elements :rtype: etree.Element **Selector Syntax** - selector "+" selector - [sourceID] "!" xpath - attribute=value or {attribute}value - entityID - sourceID (@Name) - <URL containing one selector per line> The first form results in the intersection of the results of doing a lookup on the selectors. The second form results in the EntityDescriptor elements from the source (defaults to all EntityDescriptors) that match the xpath expression. The attribute-value forms resuls in the EntityDescriptors that contain the specified entity attribute pair. If non of these forms apply, the lookup is done using either source ID (normally @Name from the EntitiesDescriptor) or the entityID of single EntityDescriptors. If member is a URI but isn't part of the metadata repository then it is fetched an treated as a list of (one per line) of selectors. If all else fails an empty list is returned. """ l = self._lookup(member, xp) return list(set(filter(lambda x: x is not None, l))) def entity_set(self, entities, name, cacheDuration=None, validUntil=None, validate=True): """ :param entities: a set of entities specifiers (lookup is used to find entities from this set) :param name: the @Name attribute :param cacheDuration: an XML timedelta expression, eg PT1H for 1hr :param validUntil: a relative time eg 2w 4d 1h for 2 weeks, 4 days and 1hour from now. Produce an EntityDescriptors set from a list of entities. Optional Name, cacheDuration and validUntil are affixed. """ attrs = dict(Name=name, nsmap=NS) if cacheDuration is not None: attrs['cacheDuration'] = cacheDuration if validUntil is not None: attrs['validUntil'] = validUntil t = etree.Element("{%s}EntitiesDescriptor" % NS['md'], **attrs) nent = 0 seen = {} # TODO make better de-duplication for member in entities: for ent in self.lookup(member): entityID = ent.get('entityID', None) if (ent is not None) and (entityID is not None) and (not seen.get(entityID, False)): t.append(deepcopy(ent)) seen[entityID] = True nent += 1 log.debug("selecting %d entities from %d entity set(s) before validation" % ( nent, len(entities))) if not nent: return None if validate: try: schema().assertValid(t) except DocumentInvalid, ex: log.debug(_e(ex.error_log)) #raise MetadataException( # "XML schema validation failed: %s" % name) return t def error_set(self, url, title, ex): """ Creates an "error" EntitiesDescriptor - empty but for an annotation about the error that occured """ t = etree.Element("{%s}EntitiesDescriptor" % NS['md'], Name=url, nsmap=NS) self.annotate(t, "error", title, ex, source=url) def keys(self): return self.md.keys() def __getitem__(self, item): return self.md[item] def __setitem__(self, key, value): self.md[key] = value def __delitem__(self, key): del self.md[key] def summary(self, uri): """ :param uri: An EntitiesDescriptor URI present in the MDRepository :return: an information dict Returns a dict object with basic information about the EntitiesDescriptor """ seen = dict() info = dict() t = root(self[uri]) info['Name'] = t.get('Name', uri) info['cacheDuration'] = t.get('cacheDuration', None) info['validUntil'] = t.get('validUntil', None) info['Duplicates'] = [] info['Size'] = 0 for e in self.entities(self[uri]): entityID = e.get('entityID') if seen.get(entityID, False): info['Duplicates'].append(entityID) else: seen[entityID] = True info['Size'] += 1 return info def merge(self, t, nt, strategy=pyff.merge_strategies.replace_existing, strategy_name=None): """ :param t: The EntitiesDescriptor element to merge *into* :param nt: The EntitiesDescriptor element to merge *from* :param strategy: A callable implementing the merge strategy pattern :param strategy_name: The name of a strategy to import. Overrides the callable if present. :return: Two EntitiesDescriptor elements are merged - the second into the first. For each element in the second collection that is present (using the @entityID attribute as key) in the first the strategy callable is called with the old and new EntityDescriptor elements as parameters. The strategy callable thus must implement the following pattern: :param old_e: The EntityDescriptor from t :param e: The EntityDescriptor from nt :return: A merged EntityDescriptor element Before each call to strategy old_e is removed from the MDRepository index and after merge the resultant EntityDescriptor is added to the index before it is used to replace old_e in t. """ if strategy_name is not None: if not '.' in strategy_name: strategy_name = "pyff.merge_strategies.%s" % strategy_name (mn, sep, fn) = strategy_name.rpartition('.') #log.debug("import %s from %s" % (fn,mn)) module = None if '.' in mn: (pn, sep, modn) = mn.rpartition('.') module = getattr(__import__( pn, globals(), locals(), [modn], -1), modn) else: module = __import__(mn, globals(), locals(), [], -1) # we might aswell let this fail early if the strategy is wrongly # named strategy = getattr(module, fn) if strategy is None: raise MetadataException("No merge strategy - refusing to merge") for e in nt.findall(".//{%s}EntityDescriptor" % NS['md']): entityID = e.get("entityID") # we assume ddup:ed tree old_e = t.find( ".//{%s}EntityDescriptor[@entityID='%s']" % (NS['md'], entityID)) #log.debug("merging %s into %s" % (e,old_e)) # update index! try: self.index.remove(old_e) #log.debug("removed old entity from index") strategy(old_e, e) new_e = t.find( ".//{%s}EntityDescriptor[@entityID='%s']" % (NS['md'], entityID)) if new_e: # we don't know which strategy was employed self.index.add(new_e) except Exception, ex: traceback.print_exc() self.index.add(old_e) raise ex
server_interm_layer.py
import socket import sys import lib.ProtocolUtils as protocolUtils import threading as thread def switch_operations(operation): switcher = { "+": ["localhost", 9991], "-": ["localhost", 9992], "*": ["localhost", 9993], "/": ["localhost", 9994], "^": ["localhost", 9995], "log": ["localhost", 9996], "root": ["localhost", 9997], } return switcher.get(operation, None) def message_handler(conn, addr): raw_data = conn.recv(1024) data = protocolUtils.MessageHandler(raw_data).message_loads() server_interface = switch_operations(data[1]) server_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_instance.connect((server_interface[0], server_interface[1])) server_instance.send(raw_data) result = server_instance.recv(1024) conn.send(result) # Close the thread to save hardware. # sys.exit() if __name__ == "__main__": socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM) socket_instance.bind(('', 9999)) socket_instance.listen(10) threads_list = [] print("Server running ...") while True: conn, addr = socket_instance.accept() temp_thread = thread.Thread(target=message_handler, args=(conn, addr,)) threads_list.append(temp_thread) temp_thread.start()
joy_detection_demo.py
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Joy detection demo.""" import argparse import collections import contextlib import io import logging import math import os import queue import signal import sys import threading import time from PIL import Image, ImageDraw, ImageFont from picamera import PiCamera from aiy.board import Board from aiy.leds import Color, Leds, Pattern, PrivacyLed from aiy.toneplayer import TonePlayer from aiy.vision.inference import CameraInference from aiy.vision.models import face_detection from aiy.vision.streaming.server import StreamingServer from aiy.vision.streaming import svg logger = logging.getLogger(__name__) JOY_COLOR = (255, 70, 0) SAD_COLOR = (0, 0, 64) JOY_SCORE_HIGH = 0.85 JOY_SCORE_LOW = 0.10 JOY_SOUND = ('C5q', 'E5q', 'C6q') SAD_SOUND = ('C6q', 'E5q', 'C5q') MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w') BEEP_SOUND = ('E6q', 'C6q') FONT_FILE = '/usr/share/fonts/truetype/freefont/FreeSans.ttf' BUZZER_GPIO = 22 @contextlib.contextmanager def stopwatch(message): try: logger.info('%s...', message) begin = time.monotonic() yield finally: end = time.monotonic() logger.info('%s done. (%fs)', message, end - begin) def run_inference(num_frames, on_loaded): """Yields (faces, (frame_width, frame_height)) tuples.""" with CameraInference(face_detection.model()) as inference: on_loaded() for result in inference.run(num_frames): yield face_detection.get_faces(result), (result.width, result.height) def threshold_detector(low_threshold, high_threshold): """Yields 'low', 'high', and None events.""" assert low_threshold < high_threshold event = None prev_score = 0.0 while True: score = (yield event) if score > high_threshold > prev_score: event = 'high' elif score < low_threshold < prev_score: event = 'low' else: event = None prev_score = score def moving_average(size): window = collections.deque(maxlen=size) window.append((yield 0.0)) while True: window.append((yield sum(window) / len(window))) def average_joy_score(faces): if faces: return sum(face.joy_score for face in faces) / len(faces) return 0.0 def draw_rectangle(draw, x0, y0, x1, y1, border, fill=None, outline=None): assert border % 2 == 1 for i in range(-border // 2, border // 2 + 1): draw.rectangle((x0 + i, y0 + i, x1 - i, y1 - i), fill=fill, outline=outline) def scale_bounding_box(bounding_box, scale_x, scale_y): x, y, w, h = bounding_box return (x * scale_x, y * scale_y, w * scale_x, h * scale_y) def svg_overlay(faces, frame_size, joy_score): width, height = frame_size doc = svg.Svg(width=width, height=height) for face in faces: x, y, w, h = face.bounding_box doc.add(svg.Rect(x=int(x), y=int(y), width=int(w), height=int(h), rx=10, ry=10, fill_opacity=0.3 * face.face_score, style='fill:red;stroke:white;stroke-width:4px')) doc.add(svg.Text('Joy: %.2f' % face.joy_score, x=x, y=y - 10, fill='red', font_size=30)) doc.add(svg.Text('Faces: %d Avg. joy: %.2f' % (len(faces), joy_score), x=10, y=50, fill='red', font_size=40)) return str(doc) class Service: def __init__(self): self._requests = queue.Queue() self._thread = threading.Thread(target=self._run, daemon=True) self._thread.start() def _run(self): while True: request = self._requests.get() if request is None: self.shutdown() break self.process(request) self._requests.task_done() def process(self, request): pass def shutdown(self): pass def submit(self, request): self._requests.put(request) def close(self): self._requests.put(None) self._thread.join() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() class Player(Service): """Controls buzzer.""" def __init__(self, gpio, bpm): super().__init__() self._toneplayer = TonePlayer(gpio, bpm) def process(self, sound): try: self._toneplayer.play(*sound) except: logger.exception('Cannot play %s sound', sound) def play(self, sound): self.submit(sound) class Photographer(Service): """Saves photographs to disk.""" def __init__(self, format, folder): super().__init__() assert format in ('jpeg', 'bmp', 'png') self._font = ImageFont.truetype(FONT_FILE, size=25) self._faces = ([], (0, 0)) self._format = format self._folder = folder def _make_filename(self, timestamp, annotated): path = '%s/%s_annotated.%s' if annotated else '%s/%s.%s' return os.path.expanduser(path % (self._folder, timestamp, self._format)) def _draw_face(self, draw, face, scale_x, scale_y): x, y, width, height = scale_bounding_box(face.bounding_box, scale_x, scale_y) text = 'Joy: %.2f' % face.joy_score _, text_height = self._font.getsize(text) margin = 3 bottom = y + height text_bottom = bottom + margin + text_height + margin draw_rectangle(draw, x, y, x + width, bottom, 3, outline='white') draw_rectangle(draw, x, bottom, x + width, text_bottom, 3, fill='white', outline='white') draw.text((x + 1 + margin, y + height + 1 + margin), text, font=self._font, fill='black') def process(self, message): if isinstance(message, tuple): self._faces = message return camera = message timestamp = time.strftime('%Y-%m-%d_%H.%M.%S') stream = io.BytesIO() with stopwatch('Taking photo'): camera.capture(stream, format=self._format, use_video_port=True) filename = self._make_filename(timestamp, annotated=False) with stopwatch('Saving original %s' % filename): stream.seek(0) with open(filename, 'wb') as file: file.write(stream.read()) faces, (width, height) = self._faces if faces: filename = self._make_filename(timestamp, annotated=True) with stopwatch('Saving annotated %s' % filename): stream.seek(0) image = Image.open(stream) draw = ImageDraw.Draw(image) scale_x, scale_y = image.width / width, image.height / height for face in faces: self._draw_face(draw, face, scale_x, scale_y) del draw image.save(filename) def update_faces(self, faces): self.submit(faces) def shoot(self, camera): self.submit(camera) class Animator(Service): """Controls RGB LEDs.""" def __init__(self, leds): super().__init__() self._leds = leds def process(self, joy_score): if joy_score > 0: self._leds.update(Leds.rgb_on(Color.blend(JOY_COLOR, SAD_COLOR, joy_score))) else: self._leds.update(Leds.rgb_off()) def shutdown(self): self._leds.update(Leds.rgb_off()) def update_joy_score(self, joy_score): self.submit(joy_score) def joy_detector(num_frames, preview_alpha, image_format, image_folder, enable_streaming, streaming_bitrate, mdns_name): done = threading.Event() def stop(): logger.info('Stopping...') done.set() signal.signal(signal.SIGINT, lambda signum, frame: stop()) signal.signal(signal.SIGTERM, lambda signum, frame: stop()) logger.info('Starting...') with contextlib.ExitStack() as stack: leds = stack.enter_context(Leds()) board = stack.enter_context(Board()) player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10)) photographer = stack.enter_context(Photographer(image_format, image_folder)) animator = stack.enter_context(Animator(leds)) # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. # Use half of that for video streaming (820x616). camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616))) stack.enter_context(PrivacyLed(leds)) server = None if enable_streaming: server = stack.enter_context(StreamingServer(camera, bitrate=streaming_bitrate, mdns_name=mdns_name)) def model_loaded(): logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(camera) if preview_alpha > 0: camera.start_preview(alpha=preview_alpha) board.button.when_pressed = take_photo joy_moving_average = moving_average(10) joy_moving_average.send(None) # Initialize. joy_threshold_detector = threshold_detector(JOY_SCORE_LOW, JOY_SCORE_HIGH) joy_threshold_detector.send(None) # Initialize. for faces, frame_size in run_inference(num_frames, model_loaded): photographer.update_faces((faces, frame_size)) joy_score = joy_moving_average.send(average_joy_score(faces)) animator.update_joy_score(joy_score) event = joy_threshold_detector.send(joy_score) if event == 'high': logger.info('High joy detected.') player.play(JOY_SOUND) elif event == 'low': logger.info('Low joy detected.') player.play(SAD_SOUND) if server: server.send_overlay(svg_overlay(faces, frame_size, joy_score)) if done.is_set(): break def preview_alpha(string): value = int(string) if value < 0 or value > 255: raise argparse.ArgumentTypeError('Must be in [0...255] range.') return value def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0 if __name__ == '__main__': sys.exit(main())
remote_lock.py
import os import logging import urllib import socket import time import threading class RemoteLock(object): def __init__(self, end_point=''): self.log = logging.getLogger("lock") self.end_point = end_point def require(self, lock_name='', owner='', timeout=60 * 1000): data = self._rpc_call(name=lock_name, owner=owner, timeout=timeout or 60 * 1000) lock = Lock(lock_name, owner) lock.timeout = data.get('lock_timeout') lock.cur_owner = data.get('lock_owner', 'error') lock.isLocked = data.get('locked') == 'ok' lock._rpc = self return lock def release(self, lock): self._rpc_call(name=lock.name, owner=lock.owner, release='Y') lock.isLocked = False return lock def _rpc_call(self, **kw): socket.setdefaulttimeout(10) data = {} kw['ajax'] = 'Y' resp_data = "" try: response = urllib.urlopen(self.end_point, urllib.urlencode(kw), proxies={}) resp_data = response.read() data['locked'] = response.headers['locked'] data['lock_owner'] = response.headers['lock_owner'] data['lock_name'] = response.headers['lock_name'] data['lock_timeout'] = response.headers['lock_timeout'] except Exception, e: self.log.warn("lock error:%s, reponse_data:%s" % (e, resp_data)) return data class Lock(object): def __init__(self, name, owner, isLocked=False): self.name = name self.owner = owner self.isLocked = isLocked self.timeout = 0 def ok(self): return self.isLocked def release(self): if hasattr(self, "_rpc"): self._rpc.release(self) class DeviceLocker(object): def __init__(self, dev_list): self.lock_service = os.getenv('REMOTE_LOCK_RPC', "http://10.56.117.81/lock/") self.lock_service = RemoteLock(self.lock_service) self.lock_names = ['dev_%s' % e.strip() for e in dev_list if e.strip()] self.owner = os.getenv('BUILD_URL', "http://127.0.0.1/") self.mutex = threading.Lock() self.locks = {} def try_and_wait_lock(self): all_is_ok = False logging.info("try to lock devices...") for retry in range(20): all_is_ok = True for e in self.lock_names: l = self.lock_service.require(e, self.owner, timeout=60 * 1000 * 5) if not l.ok(): logging.info("Failed to lock '%s', current is locked by '%s'" % (e, l.cur_owner)) all_is_ok = False else: self.locks[e] = l if all_is_ok: logging.info("all lock is ok now.") break else: logging.info("waiting 60 seconds, try again.") time.sleep(60) logging.info("all devices are locked, device:%s" % (",".join(self.lock_names))) return all_is_ok def start_to_keep_lock(self): th = threading.Thread(target=self._refresh_lock) th.daemon = True th.start() def _refresh_lock(self): logging.info("start to keep the lock...") while 1: self.mutex.acquire() if self.locks: for e in self.lock_names: l = self.lock_service.require(e, self.owner, timeout=60 * 1000 * 5) if not l.ok(): logging.warn("Failed to lock '%s', current is locked by '%s'" % (e, l.cur_owner)) self.mutex.release() else: self.mutex.release() break time.sleep(60) def stop(self): self.mutex.acquire() cur_locks = self.locks.values() self.locks = {} for e in cur_locks: e.release() self.mutex.release() logging.info("All lock is released.")
plotting.py
"""PyVista plotting module.""" import collections.abc import ctypes from functools import wraps import io import logging import os import pathlib import platform import textwrap from threading import Thread import time from typing import Dict import warnings import weakref import numpy as np import scooby import pyvista from pyvista import _vtk from pyvista.utilities import ( abstract_class, assert_empty_kwargs, convert_array, get_array, is_pyvista_dataset, numpy_to_texture, raise_not_matching, wrap, ) from ..utilities.misc import PyvistaDeprecationWarning, uses_egl from ..utilities.regression import image_from_window from ._plotting import _has_matplotlib, prepare_smooth_shading, process_opacity from .colors import Color, get_cmap_safe from .export_vtkjs import export_plotter_vtkjs from .mapper import make_mapper from .picking import PickingHelper from .render_window_interactor import RenderWindowInteractor from .renderer import Camera, Renderer from .renderers import Renderers from .scalar_bars import ScalarBars from .tools import FONTS, normalize, opacity_transfer_function, parse_font_family # noqa from .widgets import WidgetHelper SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"] VERY_FIRST_RENDER = True # windows plotter helper # EXPERIMENTAL: permit pyvista to kill the render window KILL_DISPLAY = platform.system() == 'Linux' and os.environ.get('PYVISTA_KILL_DISPLAY') if KILL_DISPLAY: # pragma: no cover # this won't work under wayland try: X11 = ctypes.CDLL("libX11.so") X11.XCloseDisplay.argtypes = [ctypes.c_void_p] except OSError: warnings.warn('PYVISTA_KILL_DISPLAY: Unable to load X11.\nProbably using wayland') KILL_DISPLAY = False def close_all(): """Close all open/active plotters and clean up memory. Returns ------- bool ``True`` when all plotters have been closed. """ for _, p in _ALL_PLOTTERS.items(): if not p._closed: p.close() p.deep_clean() _ALL_PLOTTERS.clear() return True log = logging.getLogger(__name__) log.setLevel('CRITICAL') log.addHandler(logging.StreamHandler()) def _warn_xserver(): # pragma: no cover """Check if plotting is supported and persist this state. Check once and cache this value between calls. Warn the user if plotting is not supported. Configured to check on Linux and Mac OS since the Windows check is not quick. """ # disable windows check until we can get a fast way of verifying # if windows has a windows manager (which it generally does) if os.name == 'nt': return if not hasattr(_warn_xserver, 'has_support'): _warn_xserver.has_support = pyvista.system_supports_plotting() if not _warn_xserver.has_support: # check if a display has been set if 'DISPLAY' in os.environ: return # finally, check if using a backend that doesn't require an xserver if pyvista.global_theme.jupyter_backend in ['ipygany', 'pythreejs']: return # Check if VTK has EGL support if uses_egl(): return warnings.warn( '\n' 'This system does not appear to be running an xserver.\n' 'PyVista will likely segfault when rendering.\n\n' 'Try starting a virtual frame buffer with xvfb, or using\n ' ' ``pyvista.start_xvfb()``\n' ) USE_SCALAR_BAR_ARGS = """ "stitle" is a depreciated keyword and will be removed in a future release. Use ``scalar_bar_args`` instead. For example: scalar_bar_args={'title': 'Scalar Bar Title'} """ @abstract_class class BasePlotter(PickingHelper, WidgetHelper): """To be used by the Plotter and pyvistaqt.QtInteractor classes. Parameters ---------- shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one renderer. Can also accept a string descriptor as shape. E.g.: * ``shape="3|1"`` means 3 plots on the left and 1 on the right, * ``shape="4/2"`` means 4 plots on top and 2 at the bottom. border : bool, optional Draw a border around each render window. Default ``False``. border_color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` border_width : float, optional Width of the border in pixels when enabled. title : str, optional Window title of the scalar bar lighting : str, optional What lighting to set up for the plotter. Accepted options: * ``'light_kit'``: a vtk Light Kit composed of 5 lights. * ``'three lights'``: illumination using 3 lights. * ``'none'``: no light sources at instantiation. The default is a Light Kit (to be precise, 5 separate lights that act like a Light Kit). theme : pyvista.themes.DefaultTheme, optional Plot-specific theme. """ mouse_position = None click_position = None def __init__( self, shape=(1, 1), border=None, border_color='k', border_width=2.0, title=None, splitting_position=None, groups=None, row_weights=None, col_weights=None, lighting='light kit', theme=None, **kwargs, ): """Initialize base plotter.""" super().__init__(**kwargs) # cooperative multiple inheritance log.debug('BasePlotter init start') self._theme = pyvista.themes.DefaultTheme() if theme is None: # copy global theme to ensure local plot theme is fixed # after creation. self._theme.load_theme(pyvista.global_theme) else: if not isinstance(theme, pyvista.themes.DefaultTheme): raise TypeError( 'Expected ``pyvista.themes.DefaultTheme`` for ' f'``theme``, not {type(theme).__name__}.' ) self._theme.load_theme(theme) self.image_transparent_background = self._theme.transparent_background # optional function to be called prior to closing self.__before_close_callback = None self._store_image = False self.mesh = None if title is None: title = self._theme.title self.title = str(title) # add renderers self.renderers = Renderers( self, shape, splitting_position, row_weights, col_weights, groups, border, border_color, border_width, ) # This keeps track of scalars names already plotted and their ranges self._scalar_bars = ScalarBars(self) # track if the camera has been set up self._first_time = True # Keep track of the scale # track if render window has ever been rendered self._rendered = False # this helps managing closed plotters self._closed = False # lighting style; be forgiving with input (accept underscores # and ignore case) lighting_normalized = str(lighting).replace('_', ' ').lower() if lighting_normalized == 'light kit': self.enable_lightkit() elif lighting_normalized == 'three lights': self.enable_3_lights() elif lighting_normalized != 'none': raise ValueError(f'Invalid lighting option "{lighting}".') # Add self to open plotters self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}" _ALL_PLOTTERS[self._id_name] = self # Key bindings self.reset_key_events() log.debug('BasePlotter init stop') self._image_depth_null = None self.last_image_depth = None self.last_image = None self._has_background_layer = False # set hidden line removal based on theme if self.theme.hidden_line_removal: self.enable_hidden_line_removal() # set antialiasing based on theme if self.theme.antialiasing: self.enable_anti_aliasing() @property def theme(self): """Return or set the theme used for this plotter. Examples -------- Use the dark theme for a plotter. >>> import pyvista >>> from pyvista import themes >>> pl = pyvista.Plotter() >>> pl.theme = themes.DarkTheme() >>> actor = pl.add_mesh(pyvista.Sphere()) >>> pl.show() """ return self._theme @theme.setter def theme(self, theme): if not isinstance(theme, pyvista.themes.DefaultTheme): raise TypeError( 'Expected a pyvista theme like ' '``pyvista.themes.DefaultTheme``, ' f'not {type(theme).__name__}.' ) self._theme.load_theme(theme) def import_gltf(self, filename, set_camera=True): """Import a glTF file into the plotter. See https://www.khronos.org/gltf/ for more information. Parameters ---------- filename : str Path to the glTF file. set_camera : bool, optional Set the camera viewing angle to one compatible with the default three.js perspective (``'xy'``). Examples -------- >>> import pyvista >>> from pyvista import examples >>> helmet_file = examples.gltf.download_damaged_helmet() # doctest:+SKIP >>> texture = examples.hdr.download_dikhololo_night() # doctest:+SKIP >>> pl = pyvista.Plotter() # doctest:+SKIP >>> pl.import_gltf(helmet_file) # doctest:+SKIP >>> pl.set_environment_texture(cubemap) # doctest:+SKIP >>> pl.camera.zoom(1.8) # doctest:+SKIP >>> pl.show() # doctest:+SKIP See :ref:`load_gltf` for a full example using this method. """ if not _vtk.VTK9: # pragma: no cover from pyvista.core.errors import VTKVersionError raise VTKVersionError('Support for glTF requires VTK v9 or newer') filename = os.path.abspath(os.path.expanduser(str(filename))) if not os.path.isfile(filename): raise FileNotFoundError(f'Unable to locate {filename}') # lazy import here to avoid importing unused modules from vtkmodules.vtkIOImport import vtkGLTFImporter importer = vtkGLTFImporter() importer.SetFileName(filename) importer.SetRenderWindow(self.ren_win) importer.Update() # register last actor in actors actor = self.renderer.GetActors().GetLastItem() name = actor.GetAddressAsString("") self.renderer._actors[name] = actor # set camera position to a three.js viewing perspective if set_camera: self.camera_position = 'xy' def import_vrml(self, filename): """Import a VRML file into the plotter. Parameters ---------- filename : str Path to the VRML file. Examples -------- >>> import pyvista >>> from pyvista import examples >>> sextant_file = examples.vrml.download_sextant() # doctest:+SKIP >>> pl = pyvista.Plotter() # doctest:+SKIP >>> pl.import_vrml(sextant_file) # doctest:+SKIP >>> pl.show() # doctest:+SKIP See :ref:`load_vrml_example` for a full example using this method. """ filename = os.path.abspath(os.path.expanduser(str(filename))) if not os.path.isfile(filename): raise FileNotFoundError(f'Unable to locate {filename}') # lazy import here to avoid importing unused modules from vtkmodules.vtkIOImport import vtkVRMLImporter importer = vtkVRMLImporter() importer.SetFileName(filename) importer.SetRenderWindow(self.ren_win) importer.Update() def export_html(self, filename, backend='pythreejs'): """Export this plotter as an interactive scene to a HTML file. You have the option of exposing the scene using either vtk.js (using ``panel``) or three.js (using ``pythreejs``), both of which are excellent JavaScript libraries to visualize small to moderately complex scenes for scientific visualization. Parameters ---------- filename : str Path to export the html file to. backend : str, optional One of the following: - ``'pythreejs'`` - ``'panel'`` For more details about the advantages and disadvantages of each backend, see :ref:`jupyter_plotting`. Notes ----- You will need ``ipywidgets`` and ``pythreejs`` installed if you wish to export using the ``'pythreejs'`` backend, or ``'panel'`` installed to export using ``'panel'``. Examples -------- Export as a three.js scene using the pythreejs backend. >>> import pyvista >>> from pyvista import examples >>> mesh = examples.load_uniform() >>> pl = pyvista.Plotter(shape=(1,2)) >>> _ = pl.add_mesh(mesh, scalars='Spatial Point Data', show_edges=True) >>> pl.subplot(0,1) >>> _ = pl.add_mesh(mesh, scalars='Spatial Cell Data', show_edges=True) >>> pl.export_html('pyvista.html') # doctest:+SKIP Export as a vtk.js scene using the panel backend. >>> pl.export_html('pyvista_panel.html', backend='panel') # doctest:+SKIP """ if backend == 'pythreejs': widget = self.to_pythreejs() elif backend == 'panel': self._save_panel(filename) return else: raise ValueError(f"Invalid backend {backend}. Should be either 'panel' or 'pythreejs'") widget = self.to_pythreejs() # import after converting as we check for pythreejs import first try: from ipywidgets.embed import dependency_state, embed_minimal_html except ImportError: # pragma: no cover raise ImportError('Please install ipywidgets with:\n\n\tpip install ipywidgets') # Garbage collection for embedded html output: # https://github.com/jupyter-widgets/pythreejs/issues/217 state = dependency_state(widget) # convert and write to file embed_minimal_html(filename, None, title=self.title, state=state) def _save_panel(self, filename): """Save the render window as a ``panel.pane.vtk`` html file. See https://panel.holoviz.org/api/panel.pane.vtk.html Parameters ---------- filename : str Path to export the plotter as a panel scene to. """ from ..jupyter.notebook import handle_plotter pane = handle_plotter(self, backend='panel', return_viewer=True, title=self.title) pane.save(filename) def to_pythreejs(self): """Convert this plotting scene to a pythreejs widget. Returns ------- ipywidgets.Widget Widget containing pythreejs renderer. """ self._on_first_render_request() # set up camera from pyvista.jupyter.pv_pythreejs import convert_plotter return convert_plotter(self) def export_gltf(self, filename, inline_data=True, rotate_scene=True, save_normals=True): """Export the current rendering scene as a glTF file. Visit https://gltf-viewer.donmccurdy.com/ for an online viewer. See https://vtk.org/doc/nightly/html/classvtkGLTFExporter.html for limitations regarding the exporter. Parameters ---------- filename : str Path to export the gltf file to. inline_data : bool, optional Sets if the binary data be included in the json file as a base64 string. When ``True``, only one file is exported. rotate_scene : bool, optional Rotate scene to be compatible with the glTF specifications. save_normals : bool, optional Saves the point array ``'Normals'`` as ``'NORMAL'`` in the outputted scene. Notes ----- The VTK exporter only supports :class:`pyvista.PolyData` datasets. If the plotter contains any non-PolyData datasets, these will be converted in the plotter, leading to a copy of the data internally. Examples -------- Output a simple point cloud represented as balls. >>> import numpy as np >>> import pyvista >>> point_cloud = np.random.random((100, 3)) >>> pdata = pyvista.PolyData(point_cloud) >>> pdata['orig_sphere'] = np.arange(100) >>> sphere = pyvista.Sphere(radius=0.02) >>> pc = pdata.glyph(scale=False, geom=sphere, orient=False) >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(pc, cmap='reds', smooth_shading=True, ... show_scalar_bar=False) >>> pl.export_gltf('balls.gltf') # doctest:+SKIP >>> pl.show() Output the orientation plotter. >>> from pyvista import demos >>> pl = demos.orientation_plotter() >>> pl.export_gltf('orientation_plotter.gltf') # doctest:+SKIP >>> pl.show() """ if not _vtk.VTK9: # pragma: no cover from pyvista.core.errors import VTKVersionError raise VTKVersionError('Support for glTF requires VTK v9 or newer') if not hasattr(self, "ren_win"): raise RuntimeError('This plotter has been closed and is unable to export the scene.') from vtkmodules.vtkIOExport import vtkGLTFExporter # rotate scene to gltf compatible view renamed_arrays = [] # any renamed normal arrays if rotate_scene: for renderer in self.renderers: for actor in renderer.actors.values(): if hasattr(actor, 'RotateX'): actor.RotateX(-90) actor.RotateZ(-90) if save_normals: try: mapper = actor.GetMapper() if mapper is None: continue dataset = mapper.GetInputAsDataSet() if not isinstance(dataset, pyvista.PolyData): warnings.warn( 'Plotter contains non-PolyData datasets. These have been ' 'overwritten with PolyData surfaces and are internally ' 'copies of the original datasets.' ) try: dataset = dataset.extract_surface() mapper.SetInputData(dataset) except: # pragma: no cover warnings.warn( 'During gLTF export, failed to convert some ' 'datasets to PolyData. Exported scene will not have ' 'all datasets.' ) if 'Normals' in dataset.point_data: # By default VTK uses the 'Normals' point data for normals # but gLTF uses NORMAL. point_data = dataset.GetPointData() array = point_data.GetArray('Normals') array.SetName('NORMAL') renamed_arrays.append(array) except: # noqa: E722 pass exporter = vtkGLTFExporter() exporter.SetRenderWindow(self.ren_win) exporter.SetFileName(filename) exporter.SetInlineData(inline_data) exporter.SetSaveNormal(save_normals) exporter.Update() # rotate back if applicable if rotate_scene: for renderer in self.renderers: for actor in renderer.actors.values(): if hasattr(actor, 'RotateX'): actor.RotateZ(90) actor.RotateX(90) # revert any renamed arrays for array in renamed_arrays: array.SetName('Normals') def export_vrml(self, filename): """Export the current rendering scene as a VRML file. See `vtk.VRMLExporter <https://vtk.org/doc/nightly/html/classvtkVRMLExporter.html>`_ for limitations regarding the exporter. Parameters ---------- filename : str Filename to export the scene to. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(examples.load_hexbeam()) >>> pl.export_vrml("sample") # doctest:+SKIP """ if not hasattr(self, "ren_win"): raise RuntimeError("This plotter has been closed and cannot be shown.") # lazy import here to avoid importing unused modules from vtkmodules.vtkIOExport import vtkVRMLExporter exporter = vtkVRMLExporter() exporter.SetFileName(filename) exporter.SetRenderWindow(self.ren_win) exporter.Write() def enable_hidden_line_removal(self, all_renderers=True): """Enable hidden line removal. Wireframe geometry will be drawn using hidden line removal if the rendering engine supports it. Disable this with :func:`disable_hidden_line_removal <BasePlotter.disable_hidden_line_removal>` Parameters ---------- all_renderers : bool If ``True``, applies to all renderers in subplots. If ``False``, then only applies to the active renderer. Examples -------- Create a side-by-side plotter and render a sphere in wireframe with hidden line removal enabled on the left and disabled on the right. >>> import pyvista >>> sphere = pyvista.Sphere(theta_resolution=20, phi_resolution=20) >>> pl = pyvista.Plotter(shape=(1, 2)) >>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe') >>> _ = pl.add_text("With hidden line removal") >>> pl.enable_hidden_line_removal(all_renderers=False) >>> pl.subplot(0, 1) >>> pl.disable_hidden_line_removal(all_renderers=False) >>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe') >>> _ = pl.add_text("Without hidden line removal") >>> pl.show() """ if all_renderers: for renderer in self.renderers: renderer.enable_hidden_line_removal() else: self.renderer.enable_hidden_line_removal() def disable_hidden_line_removal(self, all_renderers=True): """Disable hidden line removal. Enable again with :func:`enable_hidden_line_removal <BasePlotter.enable_hidden_line_removal>` Parameters ---------- all_renderers : bool If ``True``, applies to all renderers in subplots. If ``False``, then only applies to the active renderer. Examples -------- Enable and then disable hidden line removal. >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.enable_hidden_line_removal() >>> pl.disable_hidden_line_removal() """ if all_renderers: for renderer in self.renderers: renderer.disable_hidden_line_removal() else: self.renderer.disable_hidden_line_removal() @property def scalar_bar(self): """First scalar bar. Kept for backwards compatibility.""" return list(self.scalar_bars.values())[0] @property def scalar_bars(self): """Scalar bars. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> sphere['Data'] = sphere.points[:, 2] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere) >>> plotter.scalar_bars Scalar Bar Title Interactive "Data" False Select a scalar bar actor based on the title of the bar. >>> plotter.scalar_bars['Data'] # doctest:+SKIP (vtkmodules.vtkRenderingAnnotation.vtkScalarBarActor)0x7fcd3567ca00 """ return self._scalar_bars @property def _before_close_callback(self): """Return the cached function (expecting a reference).""" if self.__before_close_callback is not None: return self.__before_close_callback() @_before_close_callback.setter def _before_close_callback(self, func): """Store a weakref.ref of the function being called.""" if func is not None: self.__before_close_callback = weakref.ref(func) else: self.__before_close_callback = None @property def shape(self): """Shape of the plotter. Examples -------- Return the plotter shape. >>> import pyvista >>> plotter = pyvista.Plotter(shape=(2, 2)) >>> plotter.shape (2, 2) """ return self.renderers._shape @property def renderer(self): """Return the active renderer. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.renderer # doctest:+SKIP (Renderer)0x7f916129bfa0 """ return self.renderers.active_renderer @property def store_image(self): """Store last rendered frame on close. This is normally disabled to avoid caching the image, and is enabled by default by setting: ``pyvista.BUILDING_GALLERY = True`` Examples -------- >>> import pyvista >>> pl = pyvista.Plotter(off_screen=True) >>> pl.store_image = True >>> _ = pl.add_mesh(pyvista.Cube()) >>> pl.show() >>> image = pl.last_image >>> type(image) # doctest:+SKIP <class 'numpy.ndarray'> """ return self._store_image @store_image.setter def store_image(self, value): """Store last rendered frame on close.""" self._store_image = bool(value) def subplot(self, index_row, index_column=None): """Set the active subplot. Parameters ---------- index_row : int Index of the subplot to activate along the rows. index_column : int Index of the subplot to activate along the columns. Examples -------- Create a 2 wide plot and set the background of right-hand plot to orange. Add a cube to the left plot and a sphere to the right. >>> import pyvista >>> pl = pyvista.Plotter(shape=(1, 2)) >>> actor = pl.add_mesh(pyvista.Cube()) >>> pl.subplot(0, 1) >>> actor = pl.add_mesh(pyvista.Sphere()) >>> pl.set_background('orange', all_renderers=False) >>> pl.show() """ self.renderers.set_active_renderer(index_row, index_column) @wraps(Renderer.add_legend) def add_legend(self, *args, **kwargs): """Wrap ``Renderer.add_legend``.""" return self.renderer.add_legend(*args, **kwargs) @wraps(Renderer.remove_legend) def remove_legend(self, *args, **kwargs): """Wrap ``Renderer.remove_legend``.""" return self.renderer.remove_legend(*args, **kwargs) @property def legend(self): """Legend actor. There can only be one legend actor per renderer. If ``legend`` is ``None``, there is no legend actor. """ return self.renderer.legend @wraps(Renderer.add_floor) def add_floor(self, *args, **kwargs): """Wrap ``Renderer.add_floor``.""" return self.renderer.add_floor(*args, **kwargs) @wraps(Renderer.remove_floors) def remove_floors(self, *args, **kwargs): """Wrap ``Renderer.remove_floors``.""" return self.renderer.remove_floors(*args, **kwargs) def enable_3_lights(self, only_active=False): """Enable 3-lights illumination. This will replace all pre-existing lights in the scene. Parameters ---------- only_active : bool If ``True``, only change the active renderer. The default is that every renderer is affected. Examples -------- >>> from pyvista import demos >>> pl = demos.orientation_plotter() >>> pl.enable_3_lights() >>> pl.show() Note how this varies from the default plotting. >>> pl = demos.orientation_plotter() >>> pl.show() """ def _to_pos(elevation, azimuth): theta = azimuth * np.pi / 180.0 phi = (90.0 - elevation) * np.pi / 180.0 x = np.sin(theta) * np.sin(phi) y = np.cos(phi) z = np.cos(theta) * np.sin(phi) return x, y, z renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.remove_all_lights() # Inspired from Mayavi's version of Raymond Maple 3-lights illumination intensities = [1, 0.6, 0.5] all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)] for intensity, angles in zip(intensities, all_angles): light = pyvista.Light(light_type='camera light') light.intensity = intensity light.position = _to_pos(*angles) for renderer in renderers: renderer.add_light(light) def disable_3_lights(self): """Please use ``enable_lightkit``, this method has been depreciated.""" from pyvista.core.errors import DeprecationError raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``') def enable_lightkit(self, only_active=False): """Enable the default light-kit lighting. See: https://www.researchgate.net/publication/2926068 This will replace all pre-existing lights in the renderer. Parameters ---------- only_active : bool If ``True``, only change the active renderer. The default is that every renderer is affected. Examples -------- Create a plotter without any lights and then enable the default light kit. >>> import pyvista >>> pl = pyvista.Plotter(lighting=None) >>> pl.enable_lightkit() >>> actor = pl.add_mesh(pyvista.Cube(), show_edges=True) >>> pl.show() """ renderers = [self.renderer] if only_active else self.renderers light_kit = _vtk.vtkLightKit() for renderer in renderers: renderer.remove_all_lights() # Use the renderer as a vtkLightKit parser. # Feed it the LightKit, pop off the vtkLights, put back # pyvista Lights. This is the price we must pay for using # inheritance rather than composition. light_kit.AddLightsToRenderer(renderer) vtk_lights = renderer.lights renderer.remove_all_lights() for vtk_light in vtk_lights: light = pyvista.Light.from_vtk(vtk_light) renderer.add_light(light) renderer.LightFollowCameraOn() @wraps(Renderer.enable_anti_aliasing) def enable_anti_aliasing(self, *args, **kwargs): """Wrap ``Renderer.enable_anti_aliasing``.""" for renderer in self.renderers: renderer.enable_anti_aliasing(*args, **kwargs) @wraps(Renderer.disable_anti_aliasing) def disable_anti_aliasing(self, *args, **kwargs): """Wrap ``Renderer.disable_anti_aliasing``.""" self.renderer.disable_anti_aliasing(*args, **kwargs) @wraps(Renderer.set_focus) def set_focus(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_focus``.""" log.debug('set_focus: %s, %s', str(args), str(kwargs)) self.renderer.set_focus(*args, **kwargs) if render: self.render() @wraps(Renderer.set_position) def set_position(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_position``.""" self.renderer.set_position(*args, **kwargs) if render: self.render() @wraps(Renderer.set_viewup) def set_viewup(self, *args, render=True, **kwargs): """Wrap ``Renderer.set_viewup``.""" self.renderer.set_viewup(*args, **kwargs) if render: self.render() @wraps(Renderer.add_orientation_widget) def add_orientation_widget(self, *args, **kwargs): """Wrap ``Renderer.add_orientation_widget``.""" return self.renderer.add_orientation_widget(*args, **kwargs) @wraps(Renderer.add_axes) def add_axes(self, *args, **kwargs): """Wrap ``Renderer.add_axes``.""" return self.renderer.add_axes(*args, **kwargs) @wraps(Renderer.hide_axes) def hide_axes(self, *args, **kwargs): """Wrap ``Renderer.hide_axes``.""" return self.renderer.hide_axes(*args, **kwargs) @wraps(Renderer.show_axes) def show_axes(self, *args, **kwargs): """Wrap ``Renderer.show_axes``.""" return self.renderer.show_axes(*args, **kwargs) @wraps(Renderer.update_bounds_axes) def update_bounds_axes(self, *args, **kwargs): """Wrap ``Renderer.update_bounds_axes``.""" return self.renderer.update_bounds_axes(*args, **kwargs) @wraps(Renderer.add_chart) def add_chart(self, *args, **kwargs): """Wrap ``Renderer.add_chart``.""" return self.renderer.add_chart(*args, **kwargs) @wraps(Renderer.remove_chart) def remove_chart(self, *args, **kwargs): """Wrap ``Renderer.remove_chart``.""" return self.renderer.remove_chart(*args, **kwargs) @wraps(Renderer.add_actor) def add_actor(self, *args, **kwargs): """Wrap ``Renderer.add_actor``.""" return self.renderer.add_actor(*args, **kwargs) @wraps(Renderer.enable_parallel_projection) def enable_parallel_projection(self, *args, **kwargs): """Wrap ``Renderer.enable_parallel_projection``.""" return self.renderer.enable_parallel_projection(*args, **kwargs) @wraps(Renderer.disable_parallel_projection) def disable_parallel_projection(self, *args, **kwargs): """Wrap ``Renderer.disable_parallel_projection``.""" return self.renderer.disable_parallel_projection(*args, **kwargs) @wraps(Renderer.enable_shadows) def enable_shadows(self, *args, **kwargs): """Wrap ``Renderer.enable_shadows``.""" return self.renderer.enable_shadows(*args, **kwargs) @wraps(Renderer.disable_shadows) def disable_shadows(self, *args, **kwargs): """Wrap ``Renderer.disable_shadows``.""" return self.renderer.disable_shadows(*args, **kwargs) @property def parallel_projection(self): """Return parallel projection state of active render window.""" return self.renderer.parallel_projection @parallel_projection.setter def parallel_projection(self, state): """Set parallel projection state of all active render windows.""" self.renderer.parallel_projection = state @property def parallel_scale(self): """Return parallel scale of active render window.""" return self.renderer.parallel_scale @parallel_scale.setter def parallel_scale(self, value): """Set parallel scale of all active render windows.""" self.renderer.parallel_scale = value @wraps(Renderer.add_axes_at_origin) def add_axes_at_origin(self, *args, **kwargs): """Wrap ``Renderer.add_axes_at_origin``.""" return self.renderer.add_axes_at_origin(*args, **kwargs) @wraps(Renderer.show_bounds) def show_bounds(self, *args, **kwargs): """Wrap ``Renderer.show_bounds``.""" return self.renderer.show_bounds(*args, **kwargs) @wraps(Renderer.add_bounding_box) def add_bounding_box(self, *args, **kwargs): """Wrap ``Renderer.add_bounding_box``.""" return self.renderer.add_bounding_box(*args, **kwargs) @wraps(Renderer.remove_bounding_box) def remove_bounding_box(self, *args, **kwargs): """Wrap ``Renderer.remove_bounding_box``.""" return self.renderer.remove_bounding_box(*args, **kwargs) @wraps(Renderer.remove_bounds_axes) def remove_bounds_axes(self, *args, **kwargs): """Wrap ``Renderer.remove_bounds_axes``.""" return self.renderer.remove_bounds_axes(*args, **kwargs) @wraps(Renderer.show_grid) def show_grid(self, *args, **kwargs): """Wrap ``Renderer.show_grid``.""" return self.renderer.show_grid(*args, **kwargs) @wraps(Renderer.set_scale) def set_scale(self, *args, **kwargs): """Wrap ``Renderer.set_scale``.""" return self.renderer.set_scale(*args, **kwargs) @wraps(Renderer.enable_eye_dome_lighting) def enable_eye_dome_lighting(self, *args, **kwargs): """Wrap ``Renderer.enable_eye_dome_lighting``.""" return self.renderer.enable_eye_dome_lighting(*args, **kwargs) @wraps(Renderer.disable_eye_dome_lighting) def disable_eye_dome_lighting(self, *args, **kwargs): """Wrap ``Renderer.disable_eye_dome_lighting``.""" self.renderer.disable_eye_dome_lighting(*args, **kwargs) @wraps(Renderer.reset_camera) def reset_camera(self, *args, **kwargs): """Wrap ``Renderer.reset_camera``.""" self.renderer.reset_camera(*args, **kwargs) self.render() @wraps(Renderer.isometric_view) def isometric_view(self, *args, **kwargs): """Wrap ``Renderer.isometric_view``.""" self.renderer.isometric_view(*args, **kwargs) @wraps(Renderer.view_isometric) def view_isometric(self, *args, **kwarg): """Wrap ``Renderer.view_isometric``.""" self.renderer.view_isometric(*args, **kwarg) @wraps(Renderer.view_vector) def view_vector(self, *args, **kwarg): """Wrap ``Renderer.view_vector``.""" self.renderer.view_vector(*args, **kwarg) @wraps(Renderer.view_xy) def view_xy(self, *args, **kwarg): """Wrap ``Renderer.view_xy``.""" self.renderer.view_xy(*args, **kwarg) @wraps(Renderer.view_yx) def view_yx(self, *args, **kwarg): """Wrap ``Renderer.view_yx``.""" self.renderer.view_yx(*args, **kwarg) @wraps(Renderer.view_xz) def view_xz(self, *args, **kwarg): """Wrap ``Renderer.view_xz``.""" self.renderer.view_xz(*args, **kwarg) @wraps(Renderer.view_zx) def view_zx(self, *args, **kwarg): """Wrap ``Renderer.view_zx``.""" self.renderer.view_zx(*args, **kwarg) @wraps(Renderer.view_yz) def view_yz(self, *args, **kwarg): """Wrap ``Renderer.view_yz``.""" self.renderer.view_yz(*args, **kwarg) @wraps(Renderer.view_zy) def view_zy(self, *args, **kwarg): """Wrap ``Renderer.view_zy``.""" self.renderer.view_zy(*args, **kwarg) @wraps(Renderer.disable) def disable(self, *args, **kwarg): """Wrap ``Renderer.disable``.""" self.renderer.disable(*args, **kwarg) @wraps(Renderer.enable) def enable(self, *args, **kwarg): """Wrap ``Renderer.enable``.""" self.renderer.enable(*args, **kwarg) @wraps(Renderer.enable_depth_peeling) def enable_depth_peeling(self, *args, **kwargs): """Wrap ``Renderer.enable_depth_peeling``.""" if hasattr(self, 'ren_win'): result = self.renderer.enable_depth_peeling(*args, **kwargs) if result: self.ren_win.AlphaBitPlanesOn() return result @wraps(Renderer.disable_depth_peeling) def disable_depth_peeling(self): """Wrap ``Renderer.disable_depth_peeling``.""" if hasattr(self, 'ren_win'): self.ren_win.AlphaBitPlanesOff() return self.renderer.disable_depth_peeling() @wraps(Renderer.get_default_cam_pos) def get_default_cam_pos(self, *args, **kwargs): """Wrap ``Renderer.get_default_cam_pos``.""" return self.renderer.get_default_cam_pos(*args, **kwargs) @wraps(Renderer.remove_actor) def remove_actor(self, *args, **kwargs): """Wrap ``Renderer.remove_actor``.""" for renderer in self.renderers: renderer.remove_actor(*args, **kwargs) return True @wraps(Renderer.set_environment_texture) def set_environment_texture(self, *args, **kwargs): """Wrap ``Renderer.set_environment_texture``.""" return self.renderer.set_environment_texture(*args, **kwargs) #### Properties from Renderer #### @property def camera(self): """Return the active camera of the active renderer.""" if not self.camera_set: self.camera_position = self.get_default_cam_pos() self.reset_camera() self.camera_set = True return self.renderer.camera @camera.setter def camera(self, camera): """Set the active camera for the rendering scene.""" self.renderer.camera = camera @property def camera_set(self): """Return if the camera of the active renderer has been set.""" return self.renderer.camera_set @camera_set.setter def camera_set(self, is_set): """Set if the camera has been set on the active renderer.""" self.renderer.camera_set = is_set @property def bounds(self): """Return the bounds of the active renderer. Returns ------- list Bounds of the active renderer. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(pyvista.Cube()) >>> pl.bounds [-0.5, 0.5, -0.5, 0.5, -0.5, 0.5] """ return self.renderer.bounds @property def length(self): """Return the length of the diagonal of the bounding box of the scene.""" return self.renderer.length @property def center(self): """Return the center of the active renderer.""" return self.renderer.center @property def _scalar_bar_slots(self): """Return the scalar bar slots of the active renderer.""" return self.renderer._scalar_bar_slots @_scalar_bar_slots.setter def _scalar_bar_slots(self, value): """Set the scalar bar slots of the active renderer.""" self.renderer._scalar_bar_slots = value @property def _scalar_bar_slot_lookup(self): """Return the scalar bar slot lookup of the active renderer.""" return self.renderer._scalar_bar_slot_lookup @_scalar_bar_slot_lookup.setter def _scalar_bar_slot_lookup(self, value): """Set the scalar bar slot lookup of the active renderer.""" self.renderer._scalar_bar_slot_lookup = value @property def scale(self): """Return the scaling of the active renderer.""" return self.renderer.scale @scale.setter def scale(self, scale): """Set the scaling of the active renderer.""" self.renderer.set_scale(*scale) @property def camera_position(self): """Return camera position of the active render window.""" return self.renderer.camera_position @camera_position.setter def camera_position(self, camera_location): """Set camera position of the active render window.""" self.renderer.camera_position = camera_location @property def background_color(self): """Return the background color of the active render window.""" return self.renderers.active_renderer.background_color @background_color.setter def background_color(self, color): """Set the background color of all the render windows.""" self.set_background(color) @property def window_size(self): """Return the render window size in ``(width, height)``. Examples -------- Change the window size from ``200 x 200`` to ``400 x 400``. >>> import pyvista >>> pl = pyvista.Plotter(window_size=[200, 200]) >>> pl.window_size [200, 200] >>> pl.window_size = [400, 400] >>> pl.window_size [400, 400] """ return list(self.ren_win.GetSize()) @window_size.setter def window_size(self, window_size): """Set the render window size.""" self.ren_win.SetSize(window_size[0], window_size[1]) @property def image_depth(self): """Return a depth image representing current render window. Helper attribute for ``get_image_depth``. """ return self.get_image_depth() def _check_rendered(self): """Check if the render window has been shown and raise an exception if not.""" if not self._rendered: raise AttributeError( '\nThis plotter has not yet been set up and rendered ' 'with ``show()``.\n' 'Consider setting ``off_screen=True`` ' 'for off screen rendering.\n' ) def _check_has_ren_win(self): """Check if render window attribute exists and raise an exception if not.""" if not hasattr(self, 'ren_win'): raise AttributeError( '\n\nTo retrieve an image after the render window ' 'has been closed, set:\n\n' ' ``plotter.store_image = True``\n\n' 'before closing the plotter.' ) @property def image(self): """Return an image array of current render window. To retrieve an image after the render window has been closed, set: ``plotter.store_image = True`` before closing the plotter. """ if not hasattr(self, 'ren_win') and self.last_image is not None: return self.last_image self._check_rendered() self._check_has_ren_win() data = image_from_window(self.ren_win) if self.image_transparent_background: return data # ignore alpha channel return data[:, :, :-1] def render(self): """Render the main window. Does nothing until ``show`` has been called. """ if hasattr(self, 'ren_win') and not self._first_time: log.debug('Rendering') self.ren_win.Render() self._rendered = True @wraps(RenderWindowInteractor.add_key_event) def add_key_event(self, *args, **kwargs): """Wrap RenderWindowInteractor.add_key_event.""" if hasattr(self, 'iren'): self.iren.add_key_event(*args, **kwargs) def clear_events_for_key(self, key): """Remove the callbacks associated to the key. Parameters ---------- key : str Key to clear events for. """ self.iren.clear_events_for_key(key) def store_mouse_position(self, *args): """Store mouse position.""" if not hasattr(self, "iren"): raise AttributeError("This plotting window is not interactive.") self.mouse_position = self.iren.get_event_position() def store_click_position(self, *args): """Store click position in viewport coordinates.""" if not hasattr(self, "iren"): raise AttributeError("This plotting window is not interactive.") self.click_position = self.iren.get_event_position() self.mouse_position = self.click_position def track_mouse_position(self): """Keep track of the mouse position. This will potentially slow down the interactor. No callbacks supported here - use :func:`pyvista.BasePlotter.track_click_position` instead. """ self.iren.track_mouse_position(self.store_mouse_position) def untrack_mouse_position(self): """Stop tracking the mouse position.""" self.iren.untrack_mouse_position() @wraps(RenderWindowInteractor.track_click_position) def track_click_position(self, *args, **kwargs): """Wrap RenderWindowInteractor.track_click_position.""" self.iren.track_click_position(*args, **kwargs) @wraps(RenderWindowInteractor.untrack_click_position) def untrack_click_position(self, *args, **kwargs): """Stop tracking the click position.""" self.iren.untrack_click_position(*args, **kwargs) @property def pickable_actors(self): """Return or set the pickable actors. When setting, this will be the list of actors to make pickable. All actors not in the list will be made unpickable. If ``actors`` is ``None``, all actors will be made unpickable. Returns ------- list of vtk.vtkActors Examples -------- Add two actors to a :class:`pyvista.Plotter`, make one pickable, and then list the pickable actors. >>> import pyvista as pv >>> pl = pv.Plotter() >>> sphere_actor = pl.add_mesh(pv.Sphere()) >>> cube_actor = pl.add_mesh(pv.Cube(), pickable=False, style='wireframe') >>> len(pl.pickable_actors) 1 Set the pickable actors to both actors. >>> pl.pickable_actors = [sphere_actor, cube_actor] >>> len(pl.pickable_actors) 2 Set the pickable actors to ``None``. >>> pl.pickable_actors = None >>> len(pl.pickable_actors) 0 """ pickable = [] for renderer in self.renderers: for actor in renderer.actors.values(): if actor.GetPickable(): pickable.append(actor) return pickable @pickable_actors.setter def pickable_actors(self, actors=None): """Set the pickable actors.""" actors = [] if actors is None else actors if isinstance(actors, _vtk.vtkActor): actors = [actors] if not all([isinstance(actor, _vtk.vtkActor) for actor in actors]): raise TypeError( f'Expected a vtkActor instance or a list of vtkActors, got ' f'{[type(actor) for actor in actors]} instead.' ) for renderer in self.renderers: for actor in renderer.actors.values(): actor.SetPickable(actor in actors) def _prep_for_close(self): """Make sure a screenshot is acquired before closing. This doesn't actually close anything! It just preps the plotter for closing. """ # Grab screenshot right before renderer closes self.last_image = self.screenshot(True, return_img=True) self.last_image_depth = self.get_image_depth() def increment_point_size_and_line_width(self, increment): """Increment point size and line width of all actors. For every actor in the scene, increment both its point size and line width by the given value. Parameters ---------- increment : float Amount to increment point size and line width. """ for renderer in self.renderers: for actor in renderer._actors.values(): if hasattr(actor, "GetProperty"): prop = actor.GetProperty() if hasattr(prop, "SetPointSize"): prop.SetPointSize(prop.GetPointSize() + increment) if hasattr(prop, "SetLineWidth"): prop.SetLineWidth(prop.GetLineWidth() + increment) self.render() return def reset_key_events(self): """Reset all of the key press events to their defaults.""" if hasattr(self, 'iren'): self.iren.clear_key_event_callbacks() self.add_key_event('q', self._prep_for_close) # Add no matter what b_left_down_callback = lambda: self.iren.add_observer( 'LeftButtonPressEvent', self.left_button_down ) self.add_key_event('b', b_left_down_callback) self.add_key_event('v', lambda: self.isometric_view_interactive()) self.add_key_event('C', lambda: self.enable_cell_picking()) self.add_key_event('Up', lambda: self.camera.Zoom(1.05)) self.add_key_event('Down', lambda: self.camera.Zoom(0.95)) self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1)) self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1)) @wraps(RenderWindowInteractor.key_press_event) def key_press_event(self, *args, **kwargs): """Wrap RenderWindowInteractor.key_press_event.""" self.iren.key_press_event(*args, **kwargs) def left_button_down(self, obj, event_type): """Register the event for a left button down click.""" if hasattr(self.ren_win, 'GetOffScreenFramebuffer'): if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex(): # must raise a runtime error as this causes a segfault on VTK9 raise ValueError('Invoking helper with no framebuffer') # Get 2D click location on window click_pos = self.iren.get_event_position() # Get corresponding click location in the 3D plot picker = _vtk.vtkWorldPointPicker() picker.Pick(click_pos[0], click_pos[1], 0, self.renderer) self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3)) if np.any(np.isnan(self.pickpoint)): self.pickpoint[:] = 0 @wraps(RenderWindowInteractor.enable_trackball_style) def enable_trackball_style(self): """Wrap RenderWindowInteractor.enable_trackball_style.""" self.iren.enable_trackball_style() @wraps(RenderWindowInteractor.enable_trackball_actor_style) def enable_trackball_actor_style(self): """Wrap RenderWindowInteractor.enable_trackball_actor_style.""" self.iren.enable_trackball_actor_style() @wraps(RenderWindowInteractor.enable_image_style) def enable_image_style(self): """Wrap RenderWindowInteractor.enable_image_style.""" self.iren.enable_image_style() @wraps(RenderWindowInteractor.enable_joystick_style) def enable_joystick_style(self): """Wrap RenderWindowInteractor.enable_joystick_style.""" self.iren.enable_joystick_style() @wraps(RenderWindowInteractor.enable_joystick_actor_style) def enable_joystick_actor_style(self): """Wrap RenderWindowInteractor.enable_joystick_actor_style.""" self.iren.enable_joystick_actor_style() @wraps(RenderWindowInteractor.enable_zoom_style) def enable_zoom_style(self): """Wrap RenderWindowInteractor.enable_zoom_style.""" self.iren.enable_zoom_style() @wraps(RenderWindowInteractor.enable_terrain_style) def enable_terrain_style(self, *args, **kwargs): """Wrap RenderWindowInteractor.enable_terrain_style.""" self.iren.enable_terrain_style(*args, **kwargs) @wraps(RenderWindowInteractor.enable_rubber_band_style) def enable_rubber_band_style(self): """Wrap RenderWindowInteractor.enable_rubber_band_style.""" self.iren.enable_rubber_band_style() @wraps(RenderWindowInteractor.enable_rubber_band_2d_style) def enable_rubber_band_2d_style(self): """Wrap RenderWindowInteractor.enable_rubber_band_2d_style.""" self.iren.enable_rubber_band_2d_style() def enable_stereo_render(self): """Enable stereo rendering. Disable this with :func:`disable_stereo_render <BasePlotter.disable_stereo_render>` Examples -------- Enable stereo rendering to show a cube as an anaglyph image. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.enable_stereo_render() >>> pl.show() """ if hasattr(self, 'ren_win'): self.ren_win.StereoRenderOn() self.ren_win.SetStereoTypeToAnaglyph() def disable_stereo_render(self): """Disable stereo rendering. Enable again with :func:`enable_stereo_render <BasePlotter.enable_stereo_render>` Examples -------- Enable and then disable stereo rendering. It should show a simple cube. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.enable_stereo_render() >>> pl.disable_stereo_render() >>> pl.show() """ if hasattr(self, 'ren_win'): self.ren_win.StereoRenderOff() def hide_axes_all(self): """Hide the axes orientation widget in all renderers.""" for renderer in self.renderers: renderer.hide_axes() def show_axes_all(self): """Show the axes orientation widget in all renderers. Examples -------- >>> import pyvista >>> from pyvista import examples >>> >>> # create multi-window plot (1 row, 2 columns) >>> pl = pyvista.Plotter(shape=(1, 2)) >>> >>> # activate subplot 1 and add a mesh >>> pl.subplot(0, 0) >>> _ = pl.add_mesh(examples.load_globe()) >>> >>> # activate subplot 2 and add a mesh >>> pl.subplot(0, 1) >>> _ = pl.add_mesh(examples.load_airplane()) >>> >>> # show the axes orientation widget in all subplots >>> pl.show_axes_all() >>> >>> # display the window >>> pl.show() """ for renderer in self.renderers: renderer.show_axes() def isometric_view_interactive(self): """Set the current interactive render window to isometric view.""" interactor = self.iren.get_interactor_style() renderer = interactor.GetCurrentRenderer() if renderer is None: renderer = self.renderer renderer.view_isometric() def update(self, stime=1, force_redraw=True): """Update window, redraw, process messages query. Parameters ---------- stime : int, optional Duration of timer that interrupt vtkRenderWindowInteractor in milliseconds. force_redraw : bool, optional Call ``render`` immediately. """ if stime <= 0: stime = 1 curr_time = time.time() if Plotter.last_update_time > curr_time: Plotter.last_update_time = curr_time if self.iren is not None: update_rate = self.iren.get_desired_update_rate() if (curr_time - Plotter.last_update_time) > (1.0 / update_rate): self.right_timer_id = self.iren.create_repeating_timer(stime) self.render() Plotter.last_update_time = curr_time return if force_redraw: self.render() def add_mesh( self, mesh, color=None, style=None, scalars=None, clim=None, show_edges=None, edge_color=None, point_size=5.0, line_width=None, opacity=1.0, flip_scalars=False, lighting=None, n_colors=256, interpolate_before_map=True, cmap=None, label=None, reset_camera=None, scalar_bar_args=None, show_scalar_bar=None, multi_colors=False, name=None, texture=None, render_points_as_spheres=None, render_lines_as_tubes=False, smooth_shading=None, split_sharp_edges=None, ambient=0.0, diffuse=1.0, specular=0.0, specular_power=100.0, nan_color=None, nan_opacity=1.0, culling=None, rgb=None, categories=False, silhouette=False, use_transparency=False, below_color=None, above_color=None, annotations=None, pickable=True, preference="point", log_scale=False, pbr=False, metallic=0.0, roughness=0.5, render=True, component=None, **kwargs, ): """Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene. This method is using a mesh representation to view the surfaces and/or geometry of datasets. For volume rendering, see :func:`pyvista.BasePlotter.add_volume`. Parameters ---------- mesh : pyvista.DataSet or pyvista.MultiBlock Any PyVista or VTK mesh is supported. Also, any dataset that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ points. color : color_like, optional, defaults to white Use to make the entire mesh have a single solid color. Either a string, RGB list, or hex color string. For example: ``color='white'``, ``color='w'``, ``color=[1.0, 1.0, 1.0]``, or ``color='#FFFFFF'``. Color will be overridden if scalars are specified. style : str, optional Visualization style of the mesh. One of the following: ``style='surface'``, ``style='wireframe'``, ``style='points'``. Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a wireframe of the outer geometry. scalars : str or numpy.ndarray, optional Scalars used to "color" the mesh. Accepts a string name of an array that is present on the mesh or an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. If both ``color`` and ``scalars`` are ``None``, then the active scalars are used. clim : 2 item list, optional Color bar range for scalars. Defaults to minimum and maximum of scalars array. Example: ``[-1, 2]``. ``rng`` is also an accepted alias for this. show_edges : bool, optional Shows the edges of a mesh. Does not apply to a wireframe representation. edge_color : color_like, optional, defaults to black The solid color to give the edges when ``show_edges=True``. Either a string, RGB list, or hex color string. point_size : float, optional Point size of any nodes in the dataset plotted. Also applicable when style='points'. Default ``5.0``. line_width : float, optional Thickness of lines. Only valid for wireframe and surface representations. Default None. opacity : float, str, array-like Opacity of the mesh. If a single float value is given, it will be the global opacity of the mesh and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: ``'linear'``, ``'linear_r'``, ``'geom'``, ``'geom_r'``). A string could also be used to map a scalars array from the mesh to the opacity (must have same number of elements as the ``scalars`` argument). Or you can pass a custom made transfer function that is an array either ``n_colors`` in length or shorter. flip_scalars : bool, optional Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do this as well. lighting : bool, optional Enable or disable view direction lighting. Default ``False``. n_colors : int, optional Number of colors to use when displaying scalars. Defaults to 256. The scalar bar will also have this many colors. interpolate_before_map : bool, optional Enabling makes for a smoother scalars display. Default is ``True``. When ``False``, OpenGL will interpolate the mapped colors which can result is showing colors that are not present in the color map. cmap : str, list, optional Name of the Matplotlib colormap to use when mapping the ``scalars``. See available Matplotlib colormaps. Only applicable for when displaying ``scalars``. Requires Matplotlib to be installed. ``colormap`` is also an accepted alias for this. If ``colorcet`` or ``cmocean`` are installed, their colormaps can be specified by name. You can also specify a list of colors to override an existing colormap with a custom one. For example, to create a three color colormap you might specify ``['green', 'red', 'blue']``. label : str, optional String label to use when adding a legend to the scene with :func:`pyvista.BasePlotter.add_legend`. reset_camera : bool, optional Reset the camera after adding this mesh to the scene. scalar_bar_args : dict, optional Dictionary of keyword arguments to pass when adding the scalar bar to the scene. For options, see :func:`pyvista.BasePlotter.add_scalar_bar`. show_scalar_bar : bool If ``False``, a scalar bar will not be added to the scene. Defaults to ``True``. multi_colors : bool, optional If a ``MultiBlock`` dataset is given this will color each block by a solid color using matplotlib's color cycler. name : str, optional The name for the added mesh/actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. texture : vtk.vtkTexture or np.ndarray or bool, optional A texture to apply if the input mesh has texture coordinates. This will not work with MultiBlock datasets. If set to ``True``, the first available texture on the object will be used. If a string name is given, it will pull a texture with that name associated to the input mesh. render_points_as_spheres : bool, optional Render points as spheres rather than dots. render_lines_as_tubes : bool, optional Show lines as thick tubes rather than flat lines. Control the width with ``line_width``. smooth_shading : bool, optional Enable smooth shading when ``True`` using the Phong shading algorithm. When ``False``, use flat shading. Automatically enabled when ``pbr=True``. See :ref:`shading_example`. split_sharp_edges : bool, optional Split sharp edges exceeding 30 degrees when plotting with smooth shading. Control the angle with the optional keyword argument ``feature_angle``. By default this is ``False`` unless overridden by the global or plotter theme. Note that enabling this will create a copy of the input mesh within the plotter. See :ref:`shading_example`. ambient : float, optional When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. diffuse : float, optional The diffuse lighting coefficient. Default 1.0. specular : float, optional The specular lighting coefficient. Default 0.0. specular_power : float, optional The specular power. Between 0.0 and 128.0. nan_color : color_like, optional, defaults to gray The color to use for all ``NaN`` values in the plotted scalar array. nan_opacity : float, optional Opacity of ``NaN`` values. Should be between 0 and 1. Default 1.0. culling : str, optional Does not render faces that are culled. Options are ``'front'`` or ``'back'``. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Defaults to ``False``. rgb : bool, optional If an 2 dimensional array is passed as the scalars, plot those values as RGB(A) colors. ``rgba`` is also an accepted alias for this. Opacity (the A) is optional. If a scalars array ending with ``"_rgba"`` is passed, the default becomes ``True``. This can be overridden by setting this parameter to ``False``. categories : bool, optional If set to ``True``, then the number of unique values in the scalar array will be used as the ``n_colors`` argument. silhouette : dict, bool, optional If set to ``True``, plot a silhouette highlight for the mesh. This feature is only available for a triangulated ``PolyData``. As a ``dict``, it contains the properties of the silhouette to display: * ``color``: ``color_like``, color of the silhouette * ``line_width``: ``float``, edge width * ``opacity``: ``float`` between 0 and 1, edge transparency * ``feature_angle``: If a ``float``, display sharp edges exceeding that angle in degrees. * ``decimate``: ``float`` between 0 and 1, level of decimation use_transparency : bool, optional Invert the opacity mappings and make the values correspond to transparency. below_color : color_like, optional Solid color for values below the scalars range (``clim``). This will automatically set the scalar bar ``below_label`` to ``'Below'``. above_color : color_like, optional Solid color for values below the scalars range (``clim``). This will automatically set the scalar bar ``above_label`` to ``'Above'``. annotations : dict, optional Pass a dictionary of annotations. Keys are the float values in the scalars range to annotate on the scalar bar and the values are the the string annotations. pickable : bool, optional Set whether this actor is pickable. preference : str, optional When ``mesh.n_points == mesh.n_cells`` and setting scalars, this parameter sets how the scalars will be mapped to the mesh. Default ``'points'``, causes the scalars will be associated with the mesh points. Can be either ``'points'`` or ``'cells'``. log_scale : bool, optional Use log scale when mapping data to colors. Scalars less than zero are mapped to the smallest representable positive float. Default: ``True``. pbr : bool, optional Enable physics based rendering (PBR) if the mesh is ``PolyData``. Use the ``color`` argument to set the base color. This is only available in VTK>=9. metallic : float, optional Usually this value is either 0 or 1 for a real material but any value in between is valid. This parameter is only used by PBR interpolation. Default value is 0.0. roughness : float, optional This value has to be between 0 (glossy) and 1 (rough). A glossy material has reflections and a high specular part. This parameter is only used by PBR interpolation. Default value is 0.5. render : bool, optional Force a render when ``True``. Default ``True``. component : int, optional Set component of vector valued scalars to plot. Must be nonnegative, if supplied. If ``None``, the magnitude of the vector is plotted. **kwargs : dict, optional Optional developer keyword arguments. Returns ------- vtk.vtkActor VTK actor of the mesh. Examples -------- Add a sphere to the plotter and show it with a custom scalar bar title. >>> import pyvista >>> sphere = pyvista.Sphere() >>> sphere['Data'] = sphere.points[:, 2] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere, ... scalar_bar_args={'title': 'Z Position'}) >>> plotter.show() Plot using RGB on a single cell. Note that since the number of points and the number of cells are identical, we have to pass ``preference='cell'``. >>> import pyvista >>> import numpy as np >>> vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]]) >>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]]) >>> mesh = pyvista.PolyData(vertices, faces) >>> mesh.cell_data['colors'] = [[255, 255, 255], ... [0, 255, 0], ... [0, 0, 255], ... [255, 0, 0]] >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False, ... rgb=True, preference='cell') >>> plotter.camera_position='xy' >>> plotter.show() Note how this varies from ``preference=='point'``. This is because each point is now being individually colored, versus in ``preference=='point'``, each cell face is individually colored. >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False, ... rgb=True, preference='point') >>> plotter.camera_position='xy' >>> plotter.show() Plot a plane with a constant color and vary its opacity by point. >>> plane = pyvista.Plane() >>> plane.plot(color='b', opacity=np.linspace(0, 1, plane.n_points), ... show_edges=True) """ self.mapper = make_mapper(_vtk.vtkDataSetMapper) # Convert the VTK data object to a pyvista wrapped object if necessary if not is_pyvista_dataset(mesh): mesh = wrap(mesh) if not is_pyvista_dataset(mesh): raise TypeError( f'Object type ({type(mesh)}) not supported for plotting in PyVista.' ) # cast to PointSet to PolyData if isinstance(mesh, pyvista.PointSet): mesh = mesh.cast_to_polydata(deep=False) ##### Parse arguments to be used for all meshes ##### # Avoid mutating input if scalar_bar_args is None: scalar_bar_args = {'n_colors': n_colors} else: scalar_bar_args = scalar_bar_args.copy() # theme based parameters if show_edges is None: show_edges = self._theme.show_edges if split_sharp_edges is None: split_sharp_edges = self._theme.split_sharp_edges if show_scalar_bar is None: show_scalar_bar = self._theme.show_scalar_bar if lighting is None: lighting = self._theme.lighting feature_angle = kwargs.pop('feature_angle', self._theme.sharp_edges_feature_angle) if smooth_shading is None: if pbr: smooth_shading = True else: smooth_shading = self._theme.smooth_shading # supported aliases clim = kwargs.pop('rng', clim) cmap = kwargs.pop('colormap', cmap) culling = kwargs.pop("backface_culling", culling) if render_points_as_spheres is None: render_points_as_spheres = self._theme.render_points_as_spheres if name is None: name = f'{type(mesh).__name__}({mesh.memory_address})' nan_color = Color( nan_color, default_opacity=nan_opacity, default_color=self._theme.nan_color ) if color is True: color = self._theme.color if texture is False: texture = None if culling is True: culling = 'backface' rgb = kwargs.pop('rgba', rgb) # account for legacy behavior if 'stitle' in kwargs: # pragma: no cover warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning) scalar_bar_args.setdefault('title', kwargs.pop('stitle')) if "scalar" in kwargs: raise TypeError( "`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?" ) assert_empty_kwargs(**kwargs) ##### Handle composite datasets ##### if isinstance(mesh, pyvista.MultiBlock): # first check the scalars if clim is None and scalars is not None: # Get the data range across the array for all blocks # if scalars specified if isinstance(scalars, str): clim = mesh.get_data_range(scalars) else: # TODO: an array was given... how do we deal with # that? Possibly a 2D arrays or list of # arrays where first index corresponds to # the block? This could get complicated real # quick. raise TypeError( 'scalars array must be given as a string name for multiblock datasets.' ) the_arguments = locals() the_arguments.pop('self') the_arguments.pop('mesh') the_arguments.pop('kwargs') if multi_colors: # Compute unique colors for each index of the block if _has_matplotlib(): from itertools import cycle import matplotlib cycler = matplotlib.rcParams['axes.prop_cycle'] colors = cycle(cycler) else: multi_colors = False logging.warning('Please install matplotlib for color cycles') # Now iteratively plot each element of the multiblock dataset actors = [] for idx in range(mesh.GetNumberOfBlocks()): if mesh[idx] is None: continue # Get a good name to use next_name = f'{name}-{idx}' # Get the data object if not is_pyvista_dataset(mesh[idx]): data = wrap(mesh.GetBlock(idx)) if not is_pyvista_dataset(mesh[idx]): continue # move on if we can't plot it else: data = mesh.GetBlock(idx) if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1): # Note that a block can exist but be None type # or it could have zeros points (be empty) after filtering continue # Now check that scalars is available for this dataset if isinstance(data, _vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None: ts = None else: ts = scalars if multi_colors: color = next(colors)['color'] ## Add to the scene the_arguments['color'] = color the_arguments['scalars'] = ts the_arguments['name'] = next_name the_arguments['texture'] = None a = self.add_mesh(data, **the_arguments) actors.append(a) if (reset_camera is None and not self.camera_set) or reset_camera: cpos = self.get_default_cam_pos() self.camera_position = cpos self.camera_set = False self.reset_camera() return actors ##### Plot a single PyVista mesh ##### if silhouette: if isinstance(silhouette, dict): self.add_silhouette(mesh, silhouette) else: self.add_silhouette(mesh) # Try to plot something if no preference given if scalars is None and color is None and texture is None: # Prefer texture first if len(list(mesh.textures.keys())) > 0: texture = True # If no texture, plot any active scalar else: # Make sure scalars components are not vectors/tuples scalars = mesh.active_scalars_name # Don't allow plotting of string arrays by default if scalars is not None: # and np.issubdtype(mesh.active_scalars.dtype, np.number): scalar_bar_args.setdefault('title', scalars) else: scalars = None # Make sure scalars is a numpy array after this point original_scalar_name = None if isinstance(scalars, str): self.mapper.SetArrayName(scalars) # enable rgb if the scalars name ends with rgb or rgba if rgb is None: if scalars.endswith('_rgb') or scalars.endswith('_rgba'): rgb = True original_scalar_name = scalars scalars = get_array(mesh, scalars, preference=preference, err=True) scalar_bar_args.setdefault('title', original_scalar_name) # Compute surface normals if using smooth shading if smooth_shading: mesh, scalars = prepare_smooth_shading( mesh, scalars, texture, split_sharp_edges, feature_angle, preference ) if mesh.n_points < 1: raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.') # set main values self.mesh = mesh self.mapper.SetInputData(self.mesh) self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors) if interpolate_before_map: self.mapper.InterpolateScalarsBeforeMappingOn() actor = _vtk.vtkActor() prop = _vtk.vtkProperty() actor.SetMapper(self.mapper) actor.SetProperty(prop) if texture is True or isinstance(texture, (str, int)): texture = mesh._activate_texture(texture) if texture: if isinstance(texture, np.ndarray): texture = numpy_to_texture(texture) if not isinstance(texture, (_vtk.vtkTexture, _vtk.vtkOpenGLTexture)): raise TypeError(f'Invalid texture type ({type(texture)})') if mesh.GetPointData().GetTCoords() is None: raise ValueError( 'Input mesh does not have texture coordinates to support the texture.' ) actor.SetTexture(texture) # Set color to white by default when using a texture if color is None: color = 'white' if scalars is None: show_scalar_bar = False self.mapper.SetScalarModeToUsePointFieldData() # see https://github.com/pyvista/pyvista/issues/950 mesh.set_active_scalars(None) # Handle making opacity array custom_opac, opacity = process_opacity( mesh, opacity, preference, n_colors, scalars, use_transparency ) # Scalars formatting ================================================== if scalars is not None: show_scalar_bar, n_colors, clim = self.mapper.set_scalars( mesh, scalars, scalar_bar_args, rgb, component, preference, interpolate_before_map, custom_opac, annotations, log_scale, nan_color, above_color, below_color, cmap, flip_scalars, opacity, categories, n_colors, clim, self._theme, show_scalar_bar, ) elif custom_opac: # no scalars but custom opacity self.mapper.set_custom_opacity( opacity, color, mesh, n_colors, preference, interpolate_before_map, rgb, self._theme, ) else: self.mapper.SetScalarModeToUseFieldData() # Set actor properties ================================================ # select view style if not style: style = 'surface' style = style.lower() if style == 'wireframe': prop.SetRepresentationToWireframe() if color is None: color = self._theme.outline_color elif style == 'points': prop.SetRepresentationToPoints() elif style == 'surface': prop.SetRepresentationToSurface() else: raise ValueError( 'Invalid style. Must be one of the following:\n' '\t"surface"\n' '\t"wireframe"\n' '\t"points"\n' ) prop.SetPointSize(point_size) prop.SetAmbient(ambient) prop.SetDiffuse(diffuse) prop.SetSpecular(specular) prop.SetSpecularPower(specular_power) if pbr: if not _vtk.VTK9: # pragma: no cover raise RuntimeError('Physically based rendering requires VTK 9 ' 'or newer') prop.SetInterpolationToPBR() prop.SetMetallic(metallic) prop.SetRoughness(roughness) elif smooth_shading: prop.SetInterpolationToPhong() else: prop.SetInterpolationToFlat() # edge display style if show_edges: prop.EdgeVisibilityOn() rgb_color = Color(color, default_color=self._theme.color) prop.SetColor(rgb_color.float_rgb) if isinstance(opacity, (float, int)): prop.SetOpacity(opacity) prop.SetEdgeColor(Color(edge_color, default_color=self._theme.edge_color).float_rgb) if render_points_as_spheres: prop.SetRenderPointsAsSpheres(render_points_as_spheres) if render_lines_as_tubes: prop.SetRenderLinesAsTubes(render_lines_as_tubes) # legend label if label: if not isinstance(label, str): raise TypeError('Label must be a string') geom = pyvista.Triangle() if scalars is not None: geom = pyvista.Box() rgb_color = Color('black') geom.points -= geom.center addr = actor.GetAddressAsString("") self.renderer._labels[addr] = [geom, label, rgb_color] # lighting display style if not lighting: prop.LightingOff() # set line thickness if line_width: prop.SetLineWidth(line_width) self.add_actor( actor, reset_camera=reset_camera, name=name, culling=culling, pickable=pickable, render=render, ) # hide scalar bar if using special scalars if scalar_bar_args.get('title') == '__custom_rgba': show_scalar_bar = False # Only show scalar bar if there are scalars if show_scalar_bar and scalars is not None: self.add_scalar_bar(**scalar_bar_args) self.renderer.Modified() return actor def add_volume( self, volume, scalars=None, clim=None, resolution=None, opacity='linear', n_colors=256, cmap=None, flip_scalars=False, reset_camera=None, name=None, ambient=0.0, categories=False, culling=False, multi_colors=False, blending='composite', mapper=None, scalar_bar_args=None, show_scalar_bar=None, annotations=None, pickable=True, preference="point", opacity_unit_distance=None, shade=False, diffuse=0.7, specular=0.2, specular_power=10.0, render=True, **kwargs, ): """Add a volume, rendered using a smart mapper by default. Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`. Parameters ---------- volume : 3D numpy.ndarray or pyvista.UniformGrid The input volume to visualize. 3D numpy arrays are accepted. scalars : str or numpy.ndarray, optional Scalars used to "color" the mesh. Accepts a string name of an array that is present on the mesh or an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. If ``scalars`` is ``None``, then the active scalars are used. clim : 2 item list, optional Color bar range for scalars. Defaults to minimum and maximum of scalars array. Example: ``[-1, 2]``. ``rng`` is also an accepted alias for this. resolution : list, optional Block resolution. opacity : str or numpy.ndarray, optional Opacity mapping for the scalars array. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). Or you can pass a custom made transfer function that is an array either ``n_colors`` in length or shorter. n_colors : int, optional Number of colors to use when displaying scalars. Defaults to 256. The scalar bar will also have this many colors. cmap : str, optional Name of the Matplotlib colormap to us when mapping the ``scalars``. See available Matplotlib colormaps. Only applicable for when displaying ``scalars``. Requires Matplotlib to be installed. ``colormap`` is also an accepted alias for this. If ``colorcet`` or ``cmocean`` are installed, their colormaps can be specified by name. flip_scalars : bool, optional Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do this as well. reset_camera : bool, optional Reset the camera after adding this mesh to the scene. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. ambient : float, optional When lighting is enabled, this is the amount of light from 0 to 1 that reaches the actor when not directed at the light source emitted from the viewer. Default 0.0. categories : bool, optional If set to ``True``, then the number of unique values in the scalar array will be used as the ``n_colors`` argument. culling : str, optional Does not render faces that are culled. Options are ``'front'`` or ``'back'``. This can be helpful for dense surface meshes, especially when edges are visible, but can cause flat meshes to be partially displayed. Defaults ``False``. multi_colors : bool, optional Whether or not to use multiple colors when plotting MultiBlock object. Blocks will be colored sequentially as 'Reds', 'Greens', 'Blues', and 'Grays'. blending : str, optional Blending mode for visualisation of the input object(s). Can be one of 'additive', 'maximum', 'minimum', 'composite', or 'average'. Defaults to 'additive'. mapper : str, optional Volume mapper to use given by name. Options include: ``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``. If ``None`` the ``"volume_mapper"`` in the ``self._theme`` is used. scalar_bar_args : dict, optional Dictionary of keyword arguments to pass when adding the scalar bar to the scene. For options, see :func:`pyvista.BasePlotter.add_scalar_bar`. show_scalar_bar : bool If ``False``, a scalar bar will not be added to the scene. Defaults to ``True``. annotations : dict, optional Pass a dictionary of annotations. Keys are the float values in the scalars range to annotate on the scalar bar and the values are the the string annotations. pickable : bool, optional Set whether this mesh is pickable. preference : str, optional When ``mesh.n_points == mesh.n_cells`` and setting scalars, this parameter sets how the scalars will be mapped to the mesh. Default ``'points'``, causes the scalars will be associated with the mesh points. Can be either ``'points'`` or ``'cells'``. opacity_unit_distance : float Set/Get the unit distance on which the scalar opacity transfer function is defined. Meaning that over that distance, a given opacity (from the transfer function) is accumulated. This is adjusted for the actual sampling distance during rendering. By default, this is the length of the diagonal of the bounding box of the volume divided by the dimensions. shade : bool Default off. If shading is turned on, the mapper may perform shading calculations - in some cases shading does not apply (for example, in a maximum intensity projection) and therefore shading will not be performed even if this flag is on. diffuse : float, optional The diffuse lighting coefficient. Default ``1.0``. specular : float, optional The specular lighting coefficient. Default ``0.0``. specular_power : float, optional The specular power. Between ``0.0`` and ``128.0``. render : bool, optional Force a render when True. Default ``True``. **kwargs : dict, optional Optional keyword arguments. Returns ------- vtk.vtkActor VTK actor of the volume. Examples -------- Show a built-in volume example with the coolwarm colormap. >>> from pyvista import examples >>> import pyvista as pv >>> bolt_nut = examples.download_bolt_nut() >>> pl = pv.Plotter() >>> _ = pl.add_volume(bolt_nut, cmap="coolwarm") >>> pl.show() """ # Handle default arguments # Supported aliases clim = kwargs.pop('rng', clim) cmap = kwargs.pop('colormap', cmap) culling = kwargs.pop("backface_culling", culling) if "scalar" in kwargs: raise TypeError( "`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?" ) assert_empty_kwargs(**kwargs) # Avoid mutating input if scalar_bar_args is None: scalar_bar_args = {} else: scalar_bar_args = scalar_bar_args.copy() # account for legacy behavior if 'stitle' in kwargs: # pragma: no cover warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning) scalar_bar_args.setdefault('title', kwargs.pop('stitle')) if show_scalar_bar is None: show_scalar_bar = self._theme.show_scalar_bar if culling is True: culling = 'backface' if mapper is None: mapper = self._theme.volume_mapper # only render when the plotter has already been shown if render is None: render = not self._first_time # Convert the VTK data object to a pyvista wrapped object if necessary if not is_pyvista_dataset(volume): if isinstance(volume, np.ndarray): volume = wrap(volume) if resolution is None: resolution = [1, 1, 1] elif len(resolution) != 3: raise ValueError('Invalid resolution dimensions.') volume.spacing = resolution else: volume = wrap(volume) if not is_pyvista_dataset(volume): raise TypeError( f'Object type ({type(volume)}) not supported for plotting in PyVista.' ) else: # HACK: Make a copy so the original object is not altered. # Also, place all data on the nodes as issues arise when # volume rendering on the cells. volume = volume.cell_data_to_point_data() if name is None: name = f'{type(volume).__name__}({volume.memory_address})' if isinstance(volume, pyvista.MultiBlock): from itertools import cycle cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples']) # Now iteratively plot each element of the multiblock dataset actors = [] for idx in range(volume.GetNumberOfBlocks()): if volume[idx] is None: continue # Get a good name to use next_name = f'{name}-{idx}' # Get the data object block = wrap(volume.GetBlock(idx)) if resolution is None: try: block_resolution = block.GetSpacing() except AttributeError: block_resolution = resolution else: block_resolution = resolution if multi_colors: color = next(cycler) else: color = cmap a = self.add_volume( block, resolution=block_resolution, opacity=opacity, n_colors=n_colors, cmap=color, flip_scalars=flip_scalars, reset_camera=reset_camera, name=next_name, ambient=ambient, categories=categories, culling=culling, clim=clim, mapper=mapper, pickable=pickable, opacity_unit_distance=opacity_unit_distance, shade=shade, diffuse=diffuse, specular=specular, specular_power=specular_power, render=render, ) actors.append(a) return actors if not isinstance(volume, pyvista.UniformGrid): raise TypeError( f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.' ) if opacity_unit_distance is None: opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1) if scalars is None: # Make sure scalars components are not vectors/tuples scalars = volume.active_scalars # Don't allow plotting of string arrays by default if scalars is not None and np.issubdtype(scalars.dtype, np.number): scalar_bar_args.setdefault('title', volume.active_scalars_info[1]) else: raise ValueError('No scalars to use for volume rendering.') elif isinstance(scalars, str): pass ############## title = 'Data' if isinstance(scalars, str): title = scalars scalars = get_array(volume, scalars, preference=preference, err=True) scalar_bar_args.setdefault('title', title) if not isinstance(scalars, np.ndarray): scalars = np.asarray(scalars) if not np.issubdtype(scalars.dtype, np.number): raise TypeError('Non-numeric scalars are currently not supported for volume rendering.') if scalars.ndim != 1: scalars = scalars.ravel() if scalars.dtype == np.bool_ or scalars.dtype == np.uint8: scalars = scalars.astype(np.float_) # Define mapper, volume, and add the correct properties mappers = { 'fixed_point': _vtk.vtkFixedPointVolumeRayCastMapper, 'gpu': _vtk.vtkGPUVolumeRayCastMapper, 'open_gl': _vtk.vtkOpenGLGPUVolumeRayCastMapper, 'smart': _vtk.vtkSmartVolumeMapper, } if not isinstance(mapper, str) or mapper not in mappers.keys(): raise TypeError( f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}" ) self.mapper = make_mapper(mappers[mapper]) # Scalars interpolation approach if scalars.shape[0] == volume.n_points: volume.point_data.set_array(scalars, title, True) self.mapper.SetScalarModeToUsePointData() elif scalars.shape[0] == volume.n_cells: volume.cell_data.set_array(scalars, title, True) self.mapper.SetScalarModeToUseCellData() else: raise_not_matching(scalars, volume) # Set scalars range if clim is None: clim = [np.nanmin(scalars), np.nanmax(scalars)] elif isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] ############### scalars = scalars.astype(np.float_) with np.errstate(invalid='ignore'): idxs0 = scalars < clim[0] idxs1 = scalars > clim[1] scalars[idxs0] = clim[0] scalars[idxs1] = clim[1] scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255 # scalars = scalars.astype(np.uint8) volume[title] = scalars self.mapper.scalar_range = clim # Set colormap and build lookup table table = _vtk.vtkLookupTable() # table.SetNanColor(nan_color) # NaN's are chopped out with current implementation # above/below colors not supported with volume rendering if isinstance(annotations, dict): for val, anno in annotations.items(): table.SetAnnotation(float(val), str(anno)) if cmap is None: # Set default map if matplotlib is available if _has_matplotlib(): cmap = self._theme.cmap if cmap is not None: if not _has_matplotlib(): raise ImportError('Please install matplotlib for volume rendering.') cmap = get_cmap_safe(cmap) if categories: if categories is True: n_colors = len(np.unique(scalars)) elif isinstance(categories, int): n_colors = categories if flip_scalars: cmap = cmap.reversed() color_tf = _vtk.vtkColorTransferFunction() for ii in range(n_colors): color_tf.AddRGBPoint(ii, *cmap(ii)[:-1]) # Set opacities if isinstance(opacity, (float, int)): opacity_values = [opacity] * n_colors elif isinstance(opacity, str): opacity_values = pyvista.opacity_transfer_function(opacity, n_colors) elif isinstance(opacity, (np.ndarray, list, tuple)): opacity = np.array(opacity) opacity_values = opacity_transfer_function(opacity, n_colors) opacity_tf = _vtk.vtkPiecewiseFunction() for ii in range(n_colors): opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors) # Now put color tf and opacity tf into a lookup table for the scalar bar table.SetNumberOfTableValues(n_colors) lut = cmap(np.array(range(n_colors))) * 255 lut[:, 3] = opacity_values lut = lut.astype(np.uint8) table.SetTable(_vtk.numpy_to_vtk(lut)) table.SetRange(*clim) self.mapper.lookup_table = table self.mapper.SetInputData(volume) blending = blending.lower() if blending in ['additive', 'add', 'sum']: self.mapper.SetBlendModeToAdditive() elif blending in ['average', 'avg', 'average_intensity']: self.mapper.SetBlendModeToAverageIntensity() elif blending in ['composite', 'comp']: self.mapper.SetBlendModeToComposite() elif blending in ['maximum', 'max', 'maximum_intensity']: self.mapper.SetBlendModeToMaximumIntensity() elif blending in ['minimum', 'min', 'minimum_intensity']: self.mapper.SetBlendModeToMinimumIntensity() else: raise ValueError( f'Blending mode {blending!r} invalid. ' 'Please choose one of "additive", ' '"composite", "minimum" or "maximum".' ) self.mapper.Update() self.volume = _vtk.vtkVolume() self.volume.SetMapper(self.mapper) prop = _vtk.vtkVolumeProperty() prop.SetColor(color_tf) prop.SetScalarOpacity(opacity_tf) prop.SetAmbient(ambient) prop.SetScalarOpacityUnitDistance(opacity_unit_distance) prop.SetShade(shade) prop.SetDiffuse(diffuse) prop.SetSpecular(specular) prop.SetSpecularPower(specular_power) self.volume.SetProperty(prop) actor, prop = self.add_actor( self.volume, reset_camera=reset_camera, name=name, culling=culling, pickable=pickable, render=render, ) # Add scalar bar if scalars are available if show_scalar_bar and scalars is not None: self.add_scalar_bar(**scalar_bar_args) self.renderer.Modified() return actor def add_silhouette(self, mesh, params=None): """Add a silhouette of a PyVista or VTK dataset to the scene. A silhouette can also be generated directly in :func:`add_mesh <pyvista.Plotter.add_mesh>`. See also :ref:`silhouette_example`. Parameters ---------- mesh : pyvista.PolyData Mesh for generating silhouette to plot. params : dict, optional * If not supplied, the default theme values will be used. * ``color``: ``color_like``, color of the silhouette * ``line_width``: ``float``, edge width * ``opacity``: ``float`` between 0 and 1, edge transparency * ``feature_angle``: If a ``float``, display sharp edges exceeding that angle in degrees. * ``decimate``: ``float`` between 0 and 1, level of decimation Returns ------- vtk.vtkActor VTK actor of the silhouette. Examples -------- >>> import pyvista >>> from pyvista import examples >>> bunny = examples.download_bunny() >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(bunny, color='tan') >>> _ = plotter.add_silhouette(bunny, ... params={'color': 'red', 'line_width': 8.0}) >>> plotter.view_xy() >>> plotter.show() """ silhouette_params = self._theme.silhouette.to_dict() if params: silhouette_params.update(params) if not is_pyvista_dataset(mesh): mesh = wrap(mesh) if not isinstance(mesh, pyvista.PolyData): raise TypeError(f"Expected type is `PolyData` but {type(mesh)} was given.") if isinstance(silhouette_params["decimate"], float): silhouette_mesh = mesh.decimate(silhouette_params["decimate"]) else: silhouette_mesh = mesh alg = _vtk.vtkPolyDataSilhouette() alg.SetInputData(silhouette_mesh) alg.SetCamera(self.renderer.camera) if silhouette_params["feature_angle"] is not None: alg.SetEnableFeatureAngle(True) alg.SetFeatureAngle(silhouette_params["feature_angle"]) else: alg.SetEnableFeatureAngle(False) mapper = make_mapper(_vtk.vtkDataSetMapper) mapper.SetInputConnection(alg.GetOutputPort()) actor, prop = self.add_actor(mapper) prop.SetColor(Color(silhouette_params["color"]).float_rgb) prop.SetOpacity(silhouette_params["opacity"]) prop.SetLineWidth(silhouette_params["line_width"]) return actor def update_scalar_bar_range(self, clim, name=None): """Update the value range of the active or named scalar bar. Parameters ---------- clim : sequence The new range of scalar bar. Two item list (e.g. ``[-1, 2]``). name : str, optional The title of the scalar bar to update. """ if isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] if len(clim) != 2: raise TypeError('clim argument must be a length 2 iterable of values: (min, max).') if name is None: if not hasattr(self, 'mapper'): raise AttributeError('This plotter does not have an active mapper.') self.mapper.scalar_range = clim return # Use the name to find the desired actor def update_mapper(mapper_helper): mapper_helper.scalar_range = clim return try: for mh in self._scalar_bar_mappers[name]: update_mapper(mh) except KeyError: raise KeyError('Name ({}) not valid/not found in this plotter.') return def clear(self): """Clear plot by removing all actors and properties. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.clear() >>> plotter.renderer.actors {} """ self.renderers.clear() self.scalar_bars.clear() self.mesh = None def link_views(self, views=0): """Link the views' cameras. Parameters ---------- views : int | tuple or list If ``views`` is int, link the views to the given view index or if ``views`` is a tuple or a list, link the given views cameras. """ if isinstance(views, (int, np.integer)): for renderer in self.renderers: renderer.camera = self.renderers[views].camera return views = np.asarray(views) if np.issubdtype(views.dtype, np.integer): for view_index in views: self.renderers[view_index].camera = self.renderers[views[0]].camera else: raise TypeError(f'Expected type is int, list or tuple: {type(views)} is given') def unlink_views(self, views=None): """Unlink the views' cameras. Parameters ---------- views : None, int, tuple or list If ``views`` is None unlink all the views, if ``views`` is int unlink the selected view's camera or if ``views`` is a tuple or a list, unlink the given views cameras. """ if views is None: for renderer in self.renderers: renderer.camera = Camera() renderer.reset_camera() elif isinstance(views, int): self.renderers[views].camera = Camera() self.renderers[views].reset_camera() elif isinstance(views, collections.abc.Iterable): for view_index in views: self.renderers[view_index].camera = Camera() self.renderers[view_index].reset_camera() else: raise TypeError(f'Expected type is None, int, list or tuple: {type(views)} is given') @wraps(ScalarBars.add_scalar_bar) def add_scalar_bar(self, *args, **kwargs): """Wrap for ``ScalarBars.add_scalar_bar``.""" # only render when the plotter has already been shown render = kwargs.get('render', None) if render is None: kwargs['render'] = not self._first_time # check if maper exists mapper = kwargs.get('mapper', None) if mapper is None: if not hasattr(self, 'mapper') or self.mapper is None: raise AttributeError('Mapper does not exist. Add a mesh with scalars first.') kwargs['mapper'] = self.mapper # title can be the first and only arg if len(args): title = args[0] else: title = kwargs.get('title', '') if title is None: title = '' kwargs['title'] = title interactive = kwargs.get('interactive', None) if interactive is None: interactive = self._theme.interactive if self.shape != (1, 1): interactive = False elif interactive and self.shape != (1, 1): raise ValueError('Interactive scalar bars disabled for multi-renderer plots') # by default, use the plotter local theme kwargs.setdefault('theme', self._theme) return self.scalar_bars.add_scalar_bar(**kwargs) def update_scalars(self, scalars, mesh=None, render=True): """Update scalars of an object in the plotter. Parameters ---------- scalars : np.ndarray Scalars to replace existing scalars. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Force a render when True. Default ``True``. """ if mesh is None: mesh = self.mesh if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)): # Recursive if need to update scalars on many meshes for m in mesh: self.update_scalars(scalars, mesh=m, render=False) if render: self.render() return if isinstance(scalars, str): # Grab scalars array if name given scalars = get_array(mesh, scalars) if scalars is None: if render: self.render() return if scalars.shape[0] == mesh.GetNumberOfPoints(): data = mesh.GetPointData() elif scalars.shape[0] == mesh.GetNumberOfCells(): data = mesh.GetCellData() else: raise_not_matching(scalars, mesh) vtk_scalars = data.GetScalars() if vtk_scalars is None: raise ValueError('No active scalars') s = convert_array(vtk_scalars) s[:] = scalars data.Modified() try: # Why are the points updated here? Not all datasets have points # and only the scalars array is modified by this function... mesh.GetPoints().Modified() except: pass if render: self.render() def update_coordinates(self, points, mesh=None, render=True): """Update the points of an object in the plotter. Parameters ---------- points : np.ndarray Points to replace existing points. mesh : vtk.PolyData or vtk.UnstructuredGrid, optional Object that has already been added to the Plotter. If None, uses last added mesh. render : bool, optional Force a render when True. Default ``True``. """ if mesh is None: mesh = self.mesh mesh.points = points # only render when the plotter has already been shown if render is None: render = not self._first_time if render: self.render() def _clear_ren_win(self): """Clear the render window.""" if hasattr(self, 'ren_win'): self.ren_win.Finalize() del self.ren_win def close(self, render=False): """Close the render window. Parameters ---------- render : bool Unused argument. """ # optionally run just prior to exiting the plotter if self._before_close_callback is not None: self._before_close_callback(self) self._before_close_callback = None # must close out widgets first super().close() # Renderer has an axes widget, so close it self.renderers.close() self.renderers.remove_all_lights() # Grab screenshots of last render if self._store_image: self.last_image = self.screenshot(None, return_img=True) self.last_image_depth = self.get_image_depth() # reset scalar bars self.clear() # grab the display id before clearing the window # this is an experimental feature if KILL_DISPLAY: # pragma: no cover disp_id = None if hasattr(self, 'ren_win'): disp_id = self.ren_win.GetGenericDisplayId() self._clear_ren_win() if self.iren is not None: self.iren.remove_observers() self.iren.terminate_app() if KILL_DISPLAY: # pragma: no cover _kill_display(disp_id) self.iren = None if hasattr(self, 'textActor'): del self.textActor # end movie if hasattr(self, 'mwriter'): try: self.mwriter.close() except BaseException: pass # this helps managing closed plotters self._closed = True def deep_clean(self): """Clean the plotter of the memory.""" self.disable_picking() if hasattr(self, 'renderers'): self.renderers.deep_clean() if getattr(self, 'mesh', None) is not None: self.mesh.point_data = None self.mesh.cell_data = None self.mesh = None if getattr(self, 'mapper', None) is not None: self.mapper.lookup_table = None self.mapper = None self.volume = None self.textActor = None def add_text( self, text, position='upper_left', font_size=18, color=None, font=None, shadow=False, name=None, viewport=False, orientation=0.0, *, render=True, ): """Add text to plot object in the top left corner by default. Parameters ---------- text : str The text to add the rendering. position : str, tuple(float), optional Position to place the bottom left corner of the text box. If tuple is used, the position of the text uses the pixel coordinate system (default). In this case, it returns a more general `vtkOpenGLTextActor`. If string name is used, it returns a `vtkCornerAnnotation` object normally used for fixed labels (like title or xlabel). Default is to find the top left corner of the rendering window and place text box up there. Available position: ``'lower_left'``, ``'lower_right'``, ``'upper_left'``, ``'upper_right'``, ``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and ``'left_edge'``. font_size : float, optional Sets the size of the title font. Defaults to 18. color : color_like, optional Either a string, RGB list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` Defaults to :attr:`pyvista.global_theme.font.color <pyvista.themes._Font.color>`. font : str, optional Font name may be ``'courier'``, ``'times'``, or ``'arial'``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. viewport : bool, optional If ``True`` and position is a tuple of float, uses the normalized viewport coordinate system (values between 0.0 and 1.0 and support for HiDPI). orientation : float, optional Angle orientation of text counterclockwise in degrees. The text is rotated around an anchor point that may be on the edge or corner of the text. The default is 0 degrees, which is horizontal. render : bool, optional Force a render when ``True`` (default). Returns ------- vtk.vtkTextActor Text actor added to plot. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> actor = pl.add_text('Sample Text', position='upper_right', color='blue', ... shadow=True, font_size=26) >>> pl.show() """ if font is None: font = self._theme.font.family if font_size is None: font_size = self._theme.font.size if position is None: # Set the position of the text to the top left corner window_size = self.window_size x = (window_size[0] * 0.02) / self.shape[0] y = (window_size[1] * 0.85) / self.shape[0] position = [x, y] corner_mappings = { 'lower_left': _vtk.vtkCornerAnnotation.LowerLeft, 'lower_right': _vtk.vtkCornerAnnotation.LowerRight, 'upper_left': _vtk.vtkCornerAnnotation.UpperLeft, 'upper_right': _vtk.vtkCornerAnnotation.UpperRight, 'lower_edge': _vtk.vtkCornerAnnotation.LowerEdge, 'upper_edge': _vtk.vtkCornerAnnotation.UpperEdge, 'left_edge': _vtk.vtkCornerAnnotation.LeftEdge, 'right_edge': _vtk.vtkCornerAnnotation.RightEdge, } corner_mappings['ll'] = corner_mappings['lower_left'] corner_mappings['lr'] = corner_mappings['lower_right'] corner_mappings['ul'] = corner_mappings['upper_left'] corner_mappings['ur'] = corner_mappings['upper_right'] corner_mappings['top'] = corner_mappings['upper_edge'] corner_mappings['bottom'] = corner_mappings['lower_edge'] corner_mappings['right'] = corner_mappings['right_edge'] corner_mappings['r'] = corner_mappings['right_edge'] corner_mappings['left'] = corner_mappings['left_edge'] corner_mappings['l'] = corner_mappings['left_edge'] if isinstance(position, (int, str, bool)): if isinstance(position, str): position = corner_mappings[position] elif position is True: position = corner_mappings['upper_left'] self.textActor = _vtk.vtkCornerAnnotation() # This is how you set the font size with this actor self.textActor.SetLinearFontScaleFactor(font_size // 2) self.textActor.SetText(position, text) else: self.textActor = _vtk.vtkTextActor() self.textActor.SetInput(text) self.textActor.SetPosition(position) if viewport: self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport() self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport() self.textActor.GetTextProperty().SetFontSize(int(font_size * 2)) text_prop = self.textActor.GetTextProperty() text_prop.SetColor(Color(color, default_color=self._theme.font.color).float_rgb) text_prop.SetFontFamily(FONTS[font].value) text_prop.SetShadow(shadow) text_prop.SetOrientation(orientation) self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False, render=render) return self.textActor def open_movie(self, filename, framerate=24, quality=5, **kwargs): """Establish a connection to the ffmpeg writer. Parameters ---------- filename : str Filename of the movie to open. Filename should end in mp4, but other filetypes may be supported. See :func:`imageio.get_writer() <imageio.v2.get_writer>`. framerate : int, optional Frames per second. quality : int, optional Quality 10 is the top possible quality for any codec. The range is ``0 - 10``. Higher quality leads to a larger file. **kwargs : dict, optional See the documentation for :func:`imageio.get_writer() <imageio.v2.get_writer>` for additional kwargs. Notes ----- See the documentation for :func:`imageio.get_writer() <imageio.v2.get_writer>`. Examples -------- Open a MP4 movie and set the quality to maximum. >>> import pyvista >>> pl = pyvista.Plotter >>> pl.open_movie('movie.mp4', quality=10) # doctest:+SKIP """ from imageio import get_writer if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) self.mwriter = get_writer(filename, fps=framerate, quality=quality, **kwargs) def open_gif(self, filename, loop=0, fps=10, palettesize=256, subrectangles=False, **kwargs): """Open a gif file. Parameters ---------- filename : str Filename of the gif to open. Filename must end in ``"gif"``. loop : int, optional The number of iterations. Default 0 (meaning loop indefinitely). fps : float, optional The number of frames per second. If duration is not given, the duration for each frame is set to 1/fps. Default 10. palettesize : int, optional The number of colors to quantize the image to. Is rounded to the nearest power of two. Must be between 2 and 256. Default 256. subrectangles : bool, optional If ``True``, will try and optimize the GIF by storing only the rectangular parts of each frame that change with respect to the previous. Default ``False``. .. note:: Setting this to ``True`` may help reduce jitter in colorbars. **kwargs : dict, optional See the documentation for :func:`imageio.get_writer() <imageio.v2.get_writer>` for additional kwargs. Notes ----- Consider using `pygifsicle <https://github.com/LucaCappelletti94/pygifsicle>`_ to reduce the final size of the gif. See `Optimizing a GIF using pygifsicle <https://imageio.readthedocs.io/en/stable/examples.html#optimizing-a-gif-using-pygifsicle>`_. Examples -------- Open a gif file, setting the framerate to 8 frames per second and reducing the colorspace to 64. >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.open_gif('movie.gif', fps=8, palettesize=64) # doctest:+SKIP See :ref:`gif_movie_example` for a full example using this method. """ from imageio import get_writer if filename[-3:] != 'gif': raise ValueError('Unsupported filetype. Must end in .gif') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) self._gif_filename = os.path.abspath(filename) self.mwriter = get_writer( filename, mode='I', loop=loop, fps=fps, palettesize=palettesize, subrectangles=subrectangles, **kwargs, ) def write_frame(self): """Write a single frame to the movie file. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> plotter.open_movie(filename) # doctest:+SKIP >>> plotter.add_mesh(pyvista.Sphere()) # doctest:+SKIP >>> plotter.write_frame() # doctest:+SKIP See :ref:`movie_example` for a full example using this method. """ # if off screen, show has not been called and we must render # before extracting an image if self._first_time: self._on_first_render_request() self.render() if not hasattr(self, 'mwriter'): raise RuntimeError('This plotter has not opened a movie or GIF file.') self.update() self.mwriter.append_data(self.image) def _run_image_filter(self, ifilter): # Update filter and grab pixels ifilter.Modified() ifilter.Update() image = pyvista.wrap(ifilter.GetOutput()) img_size = image.dimensions img_array = pyvista.utilities.point_array(image, 'ImageScalars') # Reshape and write tgt_size = (img_size[1], img_size[0], -1) return img_array.reshape(tgt_size)[::-1] def get_image_depth(self, fill_value=np.nan, reset_camera_clipping_range=True): """Return a depth image representing current render window. Parameters ---------- fill_value : float, optional Fill value for points in image that do not include objects in scene. To not use a fill value, pass ``None``. reset_camera_clipping_range : bool, optional Reset the camera clipping range to include data in view. Returns ------- numpy.ndarray Image of depth values from camera orthogonal to image plane. Notes ----- Values in image_depth are negative to adhere to a right-handed coordinate system. Examples -------- >>> import pyvista >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.store_image = True >>> plotter.show() >>> zval = plotter.get_image_depth() """ # allow no render window if not hasattr(self, 'ren_win') and self.last_image_depth is not None: zval = self.last_image_depth.copy() if fill_value is not None: zval[self._image_depth_null] = fill_value return zval self._check_rendered() self._check_has_ren_win() # Ensure points in view are within clipping range of renderer? if reset_camera_clipping_range: self.renderer.ResetCameraClippingRange() # Get the z-buffer image ifilter = _vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() ifilter.SetInputBufferTypeToZBuffer() zbuff = self._run_image_filter(ifilter)[:, :, 0] # Convert z-buffer values to depth from camera with warnings.catch_warnings(): warnings.filterwarnings('ignore') near, far = self.camera.clipping_range if self.camera.parallel_projection: zval = (zbuff - near) / (far - near) else: zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far) # Consider image values outside clipping range as nans self._image_depth_null = np.logical_or(zval < -far, np.isclose(zval, -far)) if fill_value is not None: zval[self._image_depth_null] = fill_value return zval def add_lines(self, lines, color='w', width=5, label=None, name=None): """Add lines to the plotting object. Parameters ---------- lines : np.ndarray or pyvista.PolyData Points representing line segments. For example, two line segments would be represented as ``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``. color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` width : float, optional Thickness of lines. label : str, optional String label to use when adding a legend to the scene with :func:`pyvista.BasePlotter.add_legend`. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. Returns ------- vtk.vtkActor Lines actor. Examples -------- >>> import numpy as np >>> import pyvista >>> pl = pyvista.Plotter() >>> points = np.array([[0, 1, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0]]) >>> actor = pl.add_lines(points, color='yellow', width=3) >>> pl.camera_position = 'xy' >>> pl.show() """ if not isinstance(lines, np.ndarray): raise TypeError('Input should be an array of point segments') lines = pyvista.lines_from_points(lines) # Create mapper and add lines mapper = _vtk.vtkDataSetMapper() mapper.SetInputData(lines) rgb_color = Color(color) # Create actor actor = _vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetLineWidth(width) actor.GetProperty().EdgeVisibilityOn() actor.GetProperty().SetEdgeColor(rgb_color.float_rgb) actor.GetProperty().SetColor(rgb_color.float_rgb) actor.GetProperty().LightingOff() # legend label if label: if not isinstance(label, str): raise TypeError('Label must be a string') addr = actor.GetAddressAsString("") self.renderer._labels[addr] = [lines, label, rgb_color] # Add to renderer self.add_actor(actor, reset_camera=False, name=name, pickable=False) return actor @wraps(ScalarBars.remove_scalar_bar) def remove_scalar_bar(self, *args, **kwargs): """Remove the active scalar bar.""" self.scalar_bars.remove_scalar_bar(*args, **kwargs) def add_point_labels( self, points, labels, italic=False, bold=True, font_size=None, text_color=None, font_family=None, shadow=False, show_points=True, point_color=None, point_size=5, name=None, shape_color='grey', shape='rounded_rect', fill_shape=True, margin=3, shape_opacity=1.0, pickable=False, render_points_as_spheres=False, tolerance=0.001, reset_camera=None, always_visible=False, render=True, ): """Create a point actor with one label from list labels assigned to each point. Parameters ---------- points : sequence or pyvista.DataSet An ``n x 3`` sequence points or pyvista dataset with points. labels : list or str List of labels. Must be the same length as points. If a string name is given with a :class:`pyvista.DataSet` input for points, then these are fetched. italic : bool, optional Italicises title and bar labels. Default ``False``. bold : bool, optional Bolds title and bar labels. Default ``True``. font_size : float, optional Sets the size of the title font. Defaults to 16. text_color : color_like, optional Color of text. Either a string, RGB sequence, or hex color string. * ``text_color='white'`` * ``text_color='w'`` * ``text_color=[1.0, 1.0, 1.0]`` * ``text_color='#FFFFFF'`` font_family : str, optional Font family. Must be either ``'courier'``, ``'times'``, or ``'arial``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. show_points : bool, optional Controls if points are visible. Default ``True``. point_color : color_like, optional Either a string, rgb list, or hex color string. One of the following. * ``point_color='white'`` * ``point_color='w'`` * ``point_color=[1.0, 1.0, 1.0]`` * ``point_color='#FFFFFF'`` point_size : float, optional Size of points if visible. name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. shape_color : color_like, optional Color of points (if visible). Either a string, rgb sequence, or hex color string. shape : str, optional The string name of the shape to use. Options are ``'rect'`` or ``'rounded_rect'``. If you want no shape, pass ``None``. fill_shape : bool, optional Fill the shape with the ``shape_color``. Outlines if ``False``. margin : int, optional The size of the margin on the label background shape. Default is 3. shape_opacity : float, optional The opacity of the shape in the range of ``[0, 1]``. pickable : bool, optional Set whether this actor is pickable. render_points_as_spheres : bool, optional Render points as spheres rather than dots. tolerance : float, optional A tolerance to use to determine whether a point label is visible. A tolerance is usually required because the conversion from world space to display space during rendering introduces numerical round-off. reset_camera : bool, optional Reset the camera after adding the points to the scene. always_visible : bool, optional Skip adding the visibility filter. Default False. render : bool, optional Force a render when ``True`` (default). Returns ------- vtk.vtkActor2D VTK label actor. Can be used to change properties of the labels. Examples -------- >>> import numpy as np >>> import pyvista >>> pl = pyvista.Plotter() >>> points = np.array([[0.0, 0.0, 0.0], ... [1.0, 1.0, 0.0], ... [2.0, 0.0, 0.0]]) >>> labels = ['Point A', 'Point B', 'Point C'] >>> actor = pl.add_point_labels(points, labels, italic=True, font_size=20, ... point_color='red', point_size=20, ... render_points_as_spheres=True, ... always_visible=True, shadow=True) >>> pl.camera_position = 'xy' >>> pl.show() """ if font_family is None: font_family = self._theme.font.family if font_size is None: font_size = self._theme.font.size point_color = Color(point_color, default_color=self._theme.color) if isinstance(points, (list, tuple)): points = np.array(points) if isinstance(points, np.ndarray): vtkpoints = pyvista.PolyData(points) # Cast to poly data elif is_pyvista_dataset(points): vtkpoints = pyvista.PolyData(points.points) if isinstance(labels, str): labels = points.point_data[labels] else: raise TypeError(f'Points type not usable: {type(points)}') if len(vtkpoints.points) != len(labels): raise ValueError('There must be one label for each point') if name is None: name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})' vtklabels = _vtk.vtkStringArray() vtklabels.SetName('labels') for item in labels: vtklabels.InsertNextValue(str(item)) vtkpoints.GetPointData().AddArray(vtklabels) # Create hierarchy hier = _vtk.vtkPointSetToLabelHierarchy() hier.SetLabelArrayName('labels') if always_visible: hier.SetInputData(vtkpoints) else: # Only show visible points vis_points = _vtk.vtkSelectVisiblePoints() vis_points.SetInputData(vtkpoints) vis_points.SetRenderer(self.renderer) vis_points.SetTolerance(tolerance) hier.SetInputConnection(vis_points.GetOutputPort()) # create label mapper labelMapper = _vtk.vtkLabelPlacementMapper() labelMapper.SetInputConnection(hier.GetOutputPort()) if not isinstance(shape, str): labelMapper.SetShapeToNone() elif shape.lower() in 'rect': labelMapper.SetShapeToRect() elif shape.lower() in 'rounded_rect': labelMapper.SetShapeToRoundedRect() else: raise ValueError(f'Shape ({shape}) not understood') if fill_shape: labelMapper.SetStyleToFilled() else: labelMapper.SetStyleToOutline() labelMapper.SetBackgroundColor(Color(shape_color).float_rgb) labelMapper.SetBackgroundOpacity(shape_opacity) labelMapper.SetMargin(margin) textprop = hier.GetTextProperty() textprop.SetItalic(italic) textprop.SetBold(bold) textprop.SetFontSize(font_size) textprop.SetFontFamily(parse_font_family(font_family)) textprop.SetColor(Color(text_color, default_color=self._theme.font.color).float_rgb) textprop.SetShadow(shadow) self.remove_actor(f'{name}-points', reset_camera=False) self.remove_actor(f'{name}-labels', reset_camera=False) # add points if show_points: self.add_mesh( vtkpoints, color=point_color, point_size=point_size, name=f'{name}-points', pickable=pickable, render_points_as_spheres=render_points_as_spheres, reset_camera=reset_camera, render=render, ) label_actor = _vtk.vtkActor2D() label_actor.SetMapper(labelMapper) self.add_actor(label_actor, reset_camera=False, name=f'{name}-labels', pickable=False) return label_actor def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs): """Label the points from a dataset with the values of their scalars. Wrapper for :func:`pyvista.BasePlotter.add_point_labels`. Parameters ---------- points : numpy.ndarray or pyvista.DataSet An ``n x 3`` numpy.ndarray or pyvista dataset with points. labels : str, optional String name of the point data array to use. fmt : str, optional String formatter used to format numerical data. preamble : str, optional Text before the start of each label. **kwargs : dict, optional Keyword arguments passed to :func:`pyvista.BasePlotter.add_point_labels`. Returns ------- vtk.vtkActor2D VTK label actor. Can be used to change properties of the labels. """ if not is_pyvista_dataset(points): raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}') if not isinstance(labels, str): raise TypeError('labels must be a string name of the scalars array to use') if fmt is None: fmt = self._theme.font.fmt if fmt is None: fmt = '%.6e' scalars = points.point_data[labels] phrase = f'{preamble} %.3e' labels = [phrase % val for val in scalars] return self.add_point_labels(points, labels, **kwargs) def add_points(self, points, **kwargs): """Add points to a mesh. Parameters ---------- points : numpy.ndarray or pyvista.DataSet Array of points or the points from a pyvista object. **kwargs : dict, optional See :func:`pyvista.BasePlotter.add_mesh` for optional keyword arguments. Returns ------- vtk.vtkActor Actor of the mesh. Examples -------- Add a numpy array of points to a mesh. >>> import numpy as np >>> import pyvista >>> points = np.random.random((10, 3)) >>> pl = pyvista.Plotter() >>> actor = pl.add_points(points, render_points_as_spheres=True, ... point_size=100.0) >>> pl.show() """ kwargs['style'] = 'points' return self.add_mesh(points, **kwargs) def add_arrows(self, cent, direction, mag=1, **kwargs): """Add arrows to the plotter. Parameters ---------- cent : np.ndarray Array of centers. direction : np.ndarray Array of direction vectors. mag : float, optional Amount to scale the direction vectors. **kwargs : dict, optional See :func:`pyvista.BasePlotter.add_mesh` for optional keyword arguments. Returns ------- vtk.vtkActor VTK actor of the arrows. Examples -------- Plot a random field of vectors and save a screenshot of it. >>> import numpy as np >>> import pyvista >>> cent = np.random.random((10, 3)) >>> direction = np.random.random((10, 3)) >>> plotter = pyvista.Plotter() >>> _ = plotter.add_arrows(cent, direction, mag=2) >>> plotter.show() """ if cent.shape != direction.shape: # pragma: no cover raise ValueError('center and direction arrays must have the same shape') direction = direction.copy() if cent.ndim != 2: cent = cent.reshape((-1, 3)) if direction.ndim != 2: direction = direction.reshape((-1, 3)) if mag != 1: direction = direction * mag pdata = pyvista.vector_poly_data(cent, direction) # Create arrow object arrow = _vtk.vtkArrowSource() arrow.Update() glyph3D = _vtk.vtkGlyph3D() glyph3D.SetSourceData(arrow.GetOutput()) glyph3D.SetInputData(pdata) glyph3D.SetVectorModeToUseVector() glyph3D.Update() arrows = wrap(glyph3D.GetOutput()) return self.add_mesh(arrows, **kwargs) @staticmethod def _save_image(image, filename, return_img): """Save to file and/or return a NumPy image array. This is an internal helper. """ if not image.size: raise ValueError('Empty image. Have you run plot() first?') # write screenshot to file if requested if isinstance(filename, (str, pathlib.Path, io.BytesIO)): from PIL import Image if isinstance(filename, (str, pathlib.Path)): filename = pathlib.Path(filename) if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute(): filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename)) if not filename.suffix: filename = filename.with_suffix('.png') elif filename.suffix not in SUPPORTED_FORMATS: raise ValueError( f'Unsupported extension {filename.suffix}\n' f'Must be one of the following: {SUPPORTED_FORMATS}' ) filename = os.path.abspath(os.path.expanduser(str(filename))) Image.fromarray(image).save(filename) else: Image.fromarray(image).save(filename, format="PNG") # return image array if requested if return_img: return image def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True): """Save a screenshot of the rendering window as a graphic file. This can be helpful for publication documents. The supported formats are: * ``'.svg'`` * ``'.eps'`` * ``'.ps'`` * ``'.pdf'`` * ``'.tex'`` Parameters ---------- filename : str Path to fsave the graphic file to. title : str, optional Title to use within the file properties. raster : bool, optional Attempt to write 3D properties as a raster image. painter : bool, optional Configure the exporter to expect a painter-ordered 2D rendering, that is, a rendering at a fixed depth where primitives are drawn from the bottom up. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(examples.load_airplane(), smooth_shading=True) >>> _ = pl.add_background_image(examples.mapfile) >>> pl.save_graphic("img.svg") # doctest:+SKIP """ if not hasattr(self, 'ren_win'): raise AttributeError('This plotter is closed and unable to save a screenshot.') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) filename = os.path.abspath(os.path.expanduser(filename)) extension = pyvista.fileio.get_ext(filename) writer = _vtk.lazy_vtkGL2PSExporter() modes = { '.svg': writer.SetFileFormatToSVG, '.eps': writer.SetFileFormatToEPS, '.ps': writer.SetFileFormatToPS, '.pdf': writer.SetFileFormatToPDF, '.tex': writer.SetFileFormatToTeX, } if extension not in modes: raise ValueError( f"Extension ({extension}) is an invalid choice.\n\n" f"Valid options include: {', '.join(modes.keys())}" ) writer.CompressOff() writer.SetFilePrefix(filename.replace(extension, '')) writer.SetInput(self.ren_win) modes[extension]() writer.SetTitle(title) writer.SetWrite3DPropsAsRasterImage(raster) if painter: writer.UsePainterSettings() writer.Update() def screenshot( self, filename=None, transparent_background=None, return_img=True, window_size=None ): """Take screenshot at current camera position. Parameters ---------- filename : str, pathlib.Path, BytesIO, optional Location to write image to. If ``None``, no image is written. transparent_background : bool, optional Whether to make the background transparent. The default is looked up on the plotter's theme. return_img : bool, optional If ``True`` (the default), a NumPy array of the image will be returned. window_size : 2-length tuple, optional Set the plotter's size to this ``(width, height)`` before taking the screenshot. Returns ------- numpy.ndarray Array containing pixel RGB and alpha. Sized: * [Window height x Window width x 3] if ``transparent_background`` is set to ``False``. * [Window height x Window width x 4] if ``transparent_background`` is set to ``True``. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> plotter = pyvista.Plotter(off_screen=True) >>> actor = plotter.add_mesh(sphere) >>> plotter.screenshot('screenshot.png') # doctest:+SKIP """ if window_size is not None: self.window_size = window_size # configure image filter if transparent_background is None: transparent_background = self._theme.transparent_background self.image_transparent_background = transparent_background # This if statement allows you to save screenshots of closed plotters # This is needed for the sphinx-gallery to work if not hasattr(self, 'ren_win'): # If plotter has been closed... # check if last_image exists if self.last_image is not None: # Save last image return self._save_image(self.last_image, filename, return_img) # Plotter hasn't been rendered or was improperly closed raise RuntimeError('This plotter is closed and unable to save a screenshot.') if self._first_time and not self.off_screen: raise RuntimeError( "Nothing to screenshot - call .show first or use the off_screen argument" ) # if off screen, show has not been called and we must render # before extracting an image if self._first_time: self._on_first_render_request() self.render() return self._save_image(self.image, filename, return_img) @wraps(Renderers.set_background) def set_background(self, *args, **kwargs): """Wrap ``Renderers.set_background``.""" self.renderers.set_background(*args, **kwargs) def generate_orbital_path(self, factor=3.0, n_points=20, viewup=None, shift=0.0): """Generate an orbital path around the data scene. Parameters ---------- factor : float, optional A scaling factor when building the orbital extent. n_points : int, optional Number of points on the orbital path. viewup : list(float), optional The normal to the orbital plane. shift : float, optional Shift the plane up/down from the center of the scene by this amount. Returns ------- pyvista.PolyData PolyData containing the orbital path. Examples -------- Generate an orbital path around a sphere. >>> import pyvista >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(pyvista.Sphere()) >>> viewup = [0, 0, 1] >>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=50, ... shift=0.0, viewup=viewup) See :ref:`orbiting_example` for a full example using this method. """ if viewup is None: viewup = self._theme.camera['viewup'] center = np.array(self.center) bnds = np.array(self.bounds) radius = (bnds[1] - bnds[0]) * factor y = (bnds[3] - bnds[2]) * factor if y > radius: radius = y center += np.array(viewup) * shift return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points) def fly_to(self, point): """Move the current camera's focal point to a position point. The movement is animated over the number of frames specified in NumberOfFlyFrames. The LOD desired frame rate is used. Parameters ---------- point : sequence Point to fly to in the form of ``(x, y, z)``. """ self.iren.fly_to(self.renderer, point) def orbit_on_path( self, path=None, focus=None, step=0.5, viewup=None, write_frames=False, threaded=False, progress_bar=False, ): """Orbit on the given path focusing on the focus point. Parameters ---------- path : pyvista.PolyData Path of orbital points. The order in the points is the order of travel. focus : list(float) of length 3, optional The point of focus the camera. step : float, optional The timestep between flying to each camera position. viewup : list(float), optional The normal to the orbital plane. write_frames : bool, optional Assume a file is open and write a frame on each camera view during the orbit. threaded : bool, optional Run this as a background thread. Generally used within a GUI (i.e. PyQt). progress_bar : bool, optional Show the progress bar when proceeding through the path. This can be helpful to show progress when generating movies with ``off_screen=True``. Examples -------- Plot an orbit around the earth. Save the gif as a temporary file. >>> import os >>> from tempfile import mkdtemp >>> import pyvista >>> from pyvista import examples >>> filename = os.path.join(mkdtemp(), 'orbit.gif') >>> plotter = pyvista.Plotter(window_size=[300, 300]) >>> _ = plotter.add_mesh(examples.load_globe(), smooth_shading=True) >>> plotter.open_gif(filename) >>> viewup = [0, 0, 1] >>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=24, ... shift=0.0, viewup=viewup) >>> plotter.orbit_on_path(orbit, write_frames=True, viewup=viewup, ... step=0.02) See :ref:`orbiting_example` for a full example using this method. """ if focus is None: focus = self.center if viewup is None: viewup = self._theme.camera['viewup'] if path is None: path = self.generate_orbital_path(viewup=viewup) if not is_pyvista_dataset(path): path = pyvista.PolyData(path) points = path.points # Make sure the whole scene is visible self.camera.thickness = path.length if progress_bar: try: from tqdm import tqdm except ImportError: # pragma: no cover raise ImportError("Please install `tqdm` to use ``progress_bar=True``") def orbit(): """Define the internal thread for running the orbit.""" if progress_bar: points_seq = tqdm(points) else: points_seq = points for point in points_seq: tstart = time.time() # include the render time in the step time self.set_position(point, render=False) self.set_focus(focus, render=False) self.set_viewup(viewup, render=False) self.renderer.ResetCameraClippingRange() if write_frames: self.write_frame() else: self.render() sleep_time = step - (time.time() - tstart) if sleep_time > 0: time.sleep(sleep_time) if write_frames: self.mwriter.close() if threaded: thread = Thread(target=orbit) thread.start() else: orbit() def export_vtkjs(self, filename, compress_arrays=False): """Export the current rendering scene as a VTKjs scene. It can be used for rendering in a web browser. Parameters ---------- filename : str Filename to export the scene to. A filename extension of ``'.vtkjs'`` will be added. compress_arrays : bool, optional Enable array compression. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(examples.load_hexbeam()) >>> pl.export_vtkjs("sample") # doctest:+SKIP """ if not hasattr(self, 'ren_win'): raise RuntimeError('Export must be called before showing/closing the scene.') if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) else: filename = os.path.abspath(os.path.expanduser(filename)) export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays) def export_obj(self, filename): """Export scene to OBJ format. Parameters ---------- filename : str Filename to export the scene to. Should end in ``'.obj'``. Returns ------- vtkOBJExporter Object exporter. """ # lazy import vtkOBJExporter here as it takes a long time to # load and is not always used try: from vtkmodules.vtkIOExport import vtkOBJExporter except: # noqa: E722 from vtk import vtkOBJExporter if not hasattr(self, "ren_win"): raise RuntimeError("This plotter must still have a render window open.") if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(pyvista.FIGURE_PATH, filename) else: filename = os.path.abspath(os.path.expanduser(filename)) exporter = vtkOBJExporter() exporter.SetFilePrefix(filename) exporter.SetRenderWindow(self.ren_win) return exporter.Write() def __del__(self): """Delete the plotter.""" # We have to check here if it has the closed attribute as it # may not exist should the plotter have failed to initialize. if hasattr(self, '_closed'): if not self._closed: self.close() self.deep_clean() if hasattr(self, 'renderers'): del self.renderers def add_background_image(self, image_path, scale=1, auto_resize=True, as_global=True): """Add a background image to a plot. Parameters ---------- image_path : str Path to an image file. scale : float, optional Scale the image larger or smaller relative to the size of the window. For example, a scale size of 2 will make the largest dimension of the image twice as large as the largest dimension of the render window. Defaults to 1. auto_resize : bool, optional Resize the background when the render window changes size. as_global : bool, optional When multiple render windows are present, setting ``as_global=False`` will cause the background to only appear in one window. Examples -------- >>> import pyvista >>> from pyvista import examples >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(pyvista.Sphere()) >>> plotter.add_background_image(examples.mapfile) >>> plotter.show() """ if self.renderers.has_active_background_renderer: raise RuntimeError( 'A background image already exists. ' 'Remove it with ``remove_background_image`` ' 'before adding one' ) # Need to change the number of layers to support an additional # background layer if not self._has_background_layer: self.ren_win.SetNumberOfLayers(3) renderer = self.renderers.add_background_renderer(image_path, scale, as_global) self.ren_win.AddRenderer(renderer) # set up autoscaling of the image if auto_resize: # pragma: no cover self.iren.add_observer('ModifiedEvent', renderer.resize) @wraps(Renderers.remove_background_image) def remove_background_image(self): """Wrap ``Renderers.remove_background_image``.""" self.renderers.remove_background_image() # return the active renderer to the top, otherwise flat background # will not be rendered self.renderer.layer = 0 def _on_first_render_request(self, cpos=None): """Once an image or render is officially requested, run this routine. For example on the show call or any screenshot producing code. """ # reset unless camera for the first render unless camera is set if self._first_time: # and not self.camera_set: for renderer in self.renderers: if not renderer.camera_set and cpos is None: renderer.camera_position = renderer.get_default_cam_pos() renderer.ResetCamera() elif cpos is not None: renderer.camera_position = cpos self._first_time = False def reset_camera_clipping_range(self): """Reset camera clipping planes.""" self.renderer.ResetCameraClippingRange() def add_light(self, light, only_active=False): """Add a Light to the scene. Parameters ---------- light : Light or vtkLight The light to be added. only_active : bool, optional If ``True``, only add the light to the active renderer. The default is that every renderer adds the light. To add the light to an arbitrary renderer, see :func:`pyvista.plotting.renderer.Renderer.add_light`. Examples -------- Create a plotter that we initialize with no lights, and add a cube and a single headlight to it. >>> import pyvista as pv >>> plotter = pv.Plotter(lighting='none') >>> _ = plotter.add_mesh(pv.Cube()) >>> light = pv.Light(color='cyan', light_type='headlight') >>> plotter.add_light(light) >>> plotter.show() """ renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.add_light(light) def remove_all_lights(self, only_active=False): """Remove all lights from the scene. Parameters ---------- only_active : bool If ``True``, only remove lights from the active renderer. The default is that lights are stripped from every renderer. Examples -------- Create a plotter and remove all lights after initialization. Note how the mesh rendered is completely flat >>> import pyvista as pv >>> plotter = pv.Plotter() >>> plotter.remove_all_lights() >>> plotter.renderer.lights [] >>> _ = plotter.add_mesh(pv.Sphere(), show_edges=True) >>> plotter.show() Note how this differs from a plot with default lighting >>> pv.Sphere().plot(show_edges=True, lighting=True) """ renderers = [self.renderer] if only_active else self.renderers for renderer in renderers: renderer.remove_all_lights() def where_is(self, name): """Return the subplot coordinates of a given actor. Parameters ---------- name : str Actor's name. Returns ------- list(tuple(int)) A list with the subplot coordinates of the actor. Examples -------- >>> import pyvista as pv >>> plotter = pv.Plotter(shape=(2, 2)) >>> plotter.subplot(0, 0) >>> _ = plotter.add_mesh(pv.Box(), name='box') >>> plotter.subplot(0, 1) >>> _ = plotter.add_mesh(pv.Sphere(), name='sphere') >>> plotter.subplot(1, 0) >>> _ = plotter.add_mesh(pv.Box(), name='box') >>> plotter.subplot(1, 1) >>> _ = plotter.add_mesh(pv.Cone(), name='cone') >>> plotter.where_is('box') [(0, 0), (1, 0)] >>> plotter.show() """ places = [] for index in range(len(self.renderers)): if name in self.renderers[index]._actors: places.append(tuple(self.renderers.index_to_loc(index))) return places def add_ruler( self, pointa, pointb, flip_range=False, number_labels=5, show_labels=True, font_size_factor=0.6, label_size_factor=1.0, label_format=None, title="Distance", number_minor_ticks=0, tick_length=5, minor_tick_length=3, show_ticks=True, tick_label_offset=2, ): """Add ruler. The ruler is a 2D object that is not occluded by 3D objects. To avoid issues with perspective, it is recommended to use parallel projection, i.e. :func:`Plotter.enable_parallel_projection`, and place the ruler orthogonal to the viewing direction. The title and labels are placed to the right of ruler moving from ``pointa`` to ``pointb``. Use ``flip_range`` to flip the ``0`` location, if needed. Since the ruler is placed in an overlay on the viewing scene, the camera does not automatically reset to include the ruler in the view. Parameters ---------- pointa : Sequence Starting point for ruler. pointb : Sequence Ending point for ruler. flip_range : bool If ``True``, the distance range goes from ``pointb`` to ``pointa``. number_labels : int Number of labels to place on ruler. show_labels : bool, optional Whether to show labels. font_size_factor : float Factor to scale font size overall. label_size_factor : float Factor to scale label size relative to title size. label_format : str, optional A printf style format for labels, e.g. '%E'. title : str, optional The title to display. number_minor_ticks : int, optional Number of minor ticks between major ticks. tick_length : int Length of ticks in pixels. minor_tick_length : int Length of minor ticks in pixels. show_ticks : bool, optional Whether to show the ticks. tick_label_offset : int Offset between tick and label in pixels. Returns ------- vtk.vtkActor VTK actor of the ruler. Examples -------- >>> import pyvista >>> cone = pyvista.Cone(height=2.0, radius=0.5) >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(cone) Measure x direction of cone and place ruler slightly below. >>> _ = plotter.add_ruler( ... pointa=[cone.bounds[0], cone.bounds[2] - 0.1, 0.0], ... pointb=[cone.bounds[1], cone.bounds[2] - 0.1, 0.0], ... title="X Distance" ... ) Measure y direction of cone and place ruler slightly to left. The title and labels are placed to the right of the ruler when traveling from ``pointa`` to ``pointb``. >>> _ = plotter.add_ruler( ... pointa=[cone.bounds[0] - 0.1, cone.bounds[3], 0.0], ... pointb=[cone.bounds[0] - 0.1, cone.bounds[2], 0.0], ... flip_range=True, ... title="Y Distance" ... ) >>> plotter.enable_parallel_projection() >>> plotter.view_xy() >>> plotter.show() """ ruler = _vtk.vtkAxisActor2D() ruler.GetPositionCoordinate().SetCoordinateSystemToWorld() ruler.GetPosition2Coordinate().SetCoordinateSystemToWorld() ruler.GetPositionCoordinate().SetReferenceCoordinate(None) ruler.GetPositionCoordinate().SetValue(pointa[0], pointa[1], pointa[2]) ruler.GetPosition2Coordinate().SetValue(pointb[0], pointb[1], pointb[2]) distance = np.linalg.norm(np.asarray(pointa) - np.asarray(pointb)) if flip_range: ruler.SetRange(distance, 0) else: ruler.SetRange(0, distance) ruler.SetTitle(title) ruler.SetFontFactor(font_size_factor) ruler.SetLabelFactor(label_size_factor) ruler.SetNumberOfLabels(number_labels) ruler.SetLabelVisibility(show_labels) if label_format: ruler.SetLabelFormat(label_format) ruler.SetNumberOfMinorTicks(number_minor_ticks) ruler.SetTickVisibility(show_ticks) ruler.SetTickLength(tick_length) ruler.SetMinorTickLength(minor_tick_length) ruler.SetTickOffset(tick_label_offset) self.add_actor(ruler, reset_camera=True, pickable=False) return ruler class Plotter(BasePlotter): """Plotting object to display vtk meshes or numpy arrays. Parameters ---------- off_screen : bool, optional Renders off screen when ``True``. Useful for automated screenshots. notebook : bool, optional When ``True``, the resulting plot is placed inline a jupyter notebook. Assumes a jupyter console is active. Automatically enables ``off_screen``. shape : list or tuple, optional Number of sub-render windows inside of the main window. Specify two across with ``shape=(2, 1)`` and a two by two grid with ``shape=(2, 2)``. By default there is only one render window. Can also accept a string descriptor as shape. E.g.: * ``shape="3|1"`` means 3 plots on the left and 1 on the right, * ``shape="4/2"`` means 4 plots on top and 2 at the bottom. border : bool, optional Draw a border around each render window. Default ``False``. border_color : color_like, optional Either a string, rgb list, or hex color string. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` window_size : list, optional Window size in pixels. Defaults to ``[1024, 768]``, unless set differently in the relevant theme's ``window_size`` property. multi_samples : int, optional The number of multi-samples used to mitigate aliasing. 4 is a good default but 8 will have better results with a potential impact on performance. line_smoothing : bool, optional If ``True``, enable line smoothing. polygon_smoothing : bool, optional If ``True``, enable polygon smoothing. lighting : str, optional What lighting to set up for the plotter. Accepted options: * ``'light_kit'``: a vtk Light Kit composed of 5 lights. * ``'three lights'``: illumination using 3 lights. * ``'none'``: no light sources at instantiation. The default is a ``'light_kit'`` (to be precise, 5 separate lights that act like a Light Kit). theme : pyvista.themes.DefaultTheme, optional Plot-specific theme. Examples -------- >>> import pyvista >>> from pyvista import examples >>> mesh = examples.load_hexbeam() >>> another_mesh = examples.load_uniform() >>> plotter = pyvista.Plotter() >>> actor = plotter.add_mesh(mesh, color='red') >>> actor = plotter.add_mesh(another_mesh, color='blue') >>> plotter.show() """ last_update_time = 0.0 right_timer_id = -1 def __init__( self, off_screen=None, notebook=None, shape=(1, 1), groups=None, row_weights=None, col_weights=None, border=None, border_color='k', border_width=2.0, window_size=None, multi_samples=None, line_smoothing=False, point_smoothing=False, polygon_smoothing=False, splitting_position=None, title=None, lighting='light kit', theme=None, ): """Initialize a vtk plotting object.""" super().__init__( shape=shape, border=border, border_color=border_color, border_width=border_width, groups=groups, row_weights=row_weights, col_weights=col_weights, splitting_position=splitting_position, title=title, lighting=lighting, theme=theme, ) log.debug('Plotter init start') # check if a plotting backend is enabled _warn_xserver() def on_timer(iren, event_id): """Exit application if interactive renderer stops.""" if event_id == 'TimerEvent' and self.iren._style != "Context": self.iren.terminate_app() if off_screen is None: off_screen = pyvista.OFF_SCREEN if notebook is None: if self._theme.notebook is not None: notebook = self._theme.notebook else: notebook = scooby.in_ipykernel() self.notebook = notebook if self.notebook: off_screen = True self.off_screen = off_screen self._window_size_unset = False if window_size is None: self._window_size_unset = True window_size = self._theme.window_size self.__prior_window_size = window_size if multi_samples is None: multi_samples = self._theme.multi_samples # initialize render window self.ren_win = _vtk.vtkRenderWindow() self.ren_win.SetMultiSamples(multi_samples) self.ren_win.SetBorders(True) if line_smoothing: self.ren_win.LineSmoothingOn() if point_smoothing: self.ren_win.PointSmoothingOn() if polygon_smoothing: self.ren_win.PolygonSmoothingOn() for renderer in self.renderers: self.ren_win.AddRenderer(renderer) # Add the shadow renderer to allow us to capture interactions within # a given viewport # https://vtk.org/pipermail/vtkusers/2018-June/102030.html number_or_layers = self.ren_win.GetNumberOfLayers() current_layer = self.renderer.GetLayer() self.ren_win.SetNumberOfLayers(number_or_layers + 1) self.ren_win.AddRenderer(self.renderers.shadow_renderer) self.renderers.shadow_renderer.SetLayer(current_layer + 1) self.renderers.shadow_renderer.SetInteractive(False) # never needs to capture if self.off_screen: self.ren_win.SetOffScreenRendering(1) # vtkGenericRenderWindowInteractor has no event loop and # allows the display client to close on Linux when # off_screen. We still want an interactor for off screen # plotting since there are some widgets (like the axes # widget) that need an interactor interactor = _vtk.vtkGenericRenderWindowInteractor() else: interactor = None # Add ren win and interactor self.iren = RenderWindowInteractor(self, light_follow_camera=False, interactor=interactor) self.iren.set_render_window(self.ren_win) self.enable_trackball_style() # internally calls update_style() self.iren.add_observer("KeyPressEvent", self.key_press_event) # Set camera widget based on theme. This requires that an # interactor be present. if self.theme._enable_camera_orientation_widget: self.add_camera_orientation_widget() # Set background self.set_background(self._theme.background) # Set window size self.window_size = window_size # add timer event if interactive render exists self.iren.add_observer(_vtk.vtkCommand.TimerEvent, on_timer) if self._theme.depth_peeling.enabled: if self.enable_depth_peeling(): for renderer in self.renderers: renderer.enable_depth_peeling() log.debug('Plotter init stop') def show( self, title=None, window_size=None, interactive=True, auto_close=None, interactive_update=False, full_screen=None, screenshot=False, return_img=False, cpos=None, use_ipyvtk=None, jupyter_backend=None, return_viewer=False, return_cpos=None, **kwargs, ): """Display the plotting window. Parameters ---------- title : str, optional Title of plotting window. Defaults to :attr:`pyvista.global_theme.title <pyvista.themes.DefaultTheme.title>`. window_size : list, optional Window size in pixels. Defaults to :attr:`pyvista.global_theme.window_size <pyvista.themes.DefaultTheme.window_size>`. interactive : bool, optional Enabled by default. Allows user to pan and move figure. Defaults to :attr:`pyvista.global_theme.interactive <pyvista.themes.DefaultTheme.interactive>`. auto_close : bool, optional Exits plotting session when user closes the window when interactive is ``True``. Defaults to :attr:`pyvista.global_theme.auto_close <pyvista.themes.DefaultTheme.auto_close>`. interactive_update : bool, optional Disabled by default. Allows user to non-blocking draw, user should call :func:`BasePlotter.update` in each iteration. full_screen : bool, optional Opens window in full screen. When enabled, ignores ``window_size``. Defaults to :attr:`pyvista.global_theme.full_screen <pyvista.themes.DefaultTheme.full_screen>`. screenshot : str, pathlib.Path, BytesIO or bool, optional Take a screenshot of the initial state of the plot. If a string, it specifies the path to which the screenshot is saved. If ``True``, the screenshot is returned as an array. Defaults to ``False``. For interactive screenshots it's recommended to first call ``show()`` with ``auto_close=False`` to set the scene, then save the screenshot in a separate call to ``show()`` or :func:`Plotter.screenshot`. return_img : bool Returns a numpy array representing the last image along with the camera position. cpos : list(tuple(floats)) The camera position. You can also set this with :attr:`Plotter.camera_position`. use_ipyvtk : bool, optional Deprecated. Instead, set the backend either globally with ``pyvista.set_jupyter_backend('ipyvtklink')`` or with ``backend='ipyvtklink'``. jupyter_backend : str, optional Jupyter notebook plotting backend to use. One of the following: * ``'none'`` : Do not display in the notebook. * ``'pythreejs'`` : Show a ``pythreejs`` widget * ``'static'`` : Display a static figure. * ``'ipygany'`` : Show a ``ipygany`` widget * ``'panel'`` : Show a ``panel`` widget. This can also be set globally with :func:`pyvista.set_jupyter_backend`. return_viewer : bool, optional Return the jupyterlab viewer, scene, or display object when plotting with jupyter notebook. return_cpos : bool, optional Return the last camera position from the render window when enabled. Default based on theme setting. See :attr:`pyvista.themes.DefaultTheme.return_cpos`. **kwargs : dict, optional Developer keyword arguments. Returns ------- cpos : list List of camera position, focal point, and view up. Returned only when ``return_cpos=True`` or set in the default global or plot theme. Not returned when in a jupyter notebook and ``return_viewer=True``. image : np.ndarray Numpy array of the last image when either ``return_img=True`` or ``screenshot=True`` is set. Not returned when in a jupyter notebook with ``return_viewer=True``. Optionally contains alpha values. Sized: * [Window height x Window width x 3] if the theme sets ``transparent_background=False``. * [Window height x Window width x 4] if the theme sets ``transparent_background=True``. widget IPython widget when ``return_viewer=True``. Notes ----- Please use the ``q``-key to close the plotter as some operating systems (namely Windows) will experience issues saving a screenshot if the exit button in the GUI is pressed. Examples -------- Simply show the plot of a mesh. >>> import pyvista as pv >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.show() Take a screenshot interactively. Screenshot will be of the first image shown, so use the first call with ``auto_close=False`` to set the scene before taking the screenshot. >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Cube()) >>> pl.show(auto_close=False) # doctest:+SKIP >>> pl.show(screenshot='my_image.png') # doctest:+SKIP Display a ``pythreejs`` scene within a jupyter notebook >>> pl.show(jupyter_backend='pythreejs') # doctest:+SKIP Return a ``pythreejs`` scene. >>> pl.show(jupyter_backend='pythreejs', return_viewer=True) # doctest:+SKIP Obtain the camera position when using ``show``. >>> pl = pv.Plotter() >>> _ = pl.add_mesh(pv.Sphere()) >>> pl.show(return_cpos=True) # doctest:+SKIP [(2.223005211686484, -0.3126909484828709, 2.4686209867735065), (0.0, 0.0, 0.0), (-0.6839951597283509, -0.47207319712073137, 0.5561452310578585)] """ # developer keyword argument: runs a function immediately prior to ``close`` self._before_close_callback = kwargs.pop('before_close_callback', None) jupyter_kwargs = kwargs.pop('jupyter_kwargs', {}) assert_empty_kwargs(**kwargs) if interactive_update and auto_close is None: auto_close = False elif interactive_update and auto_close: warnings.warn( textwrap.dedent( """ The plotter will close immediately automatically since ``auto_close=True``. Either, do not specify ``auto_close``, or set it to ``False`` if you want to interact with the plotter interactively. """ ).strip() ) elif auto_close is None: auto_close = self._theme.auto_close if use_ipyvtk: txt = textwrap.dedent( """ use_ipyvtk is deprecated. Set the backend globally with ``pyvista.set_jupyter_backend("ipyvtklink") or with ``backend="ipyvtklink"`` """ ).strip() from pyvista.core.errors import DeprecationError raise DeprecationError(txt) if not hasattr(self, "ren_win"): raise RuntimeError("This plotter has been closed and cannot be shown.") if full_screen is None: full_screen = self._theme.full_screen if full_screen: self.ren_win.SetFullScreen(True) self.ren_win.BordersOn() # super buggy when disabled else: if window_size is None: window_size = self.window_size else: self._window_size_unset = False self.ren_win.SetSize(window_size[0], window_size[1]) # reset unless camera for the first render unless camera is set self._on_first_render_request(cpos) # handle plotter notebook if jupyter_backend and not self.notebook: warnings.warn( 'Not within a jupyter notebook environment.\nIgnoring ``jupyter_backend``.' ) if self.notebook: from ..jupyter.notebook import handle_plotter if jupyter_backend is None: jupyter_backend = self._theme.jupyter_backend if jupyter_backend != 'none': if screenshot: warnings.warn( '\nSet `jupyter_backend` backend to `"none"` to take a screenshot' ' within a notebook environment.' ) disp = handle_plotter( self, backend=jupyter_backend, return_viewer=return_viewer, **jupyter_kwargs ) return disp self.render() # This has to be after the first render for some reason if title is None: title = self.title if title: self.ren_win.SetWindowName(title) self.title = title # Keep track of image for sphinx-gallery if pyvista.BUILDING_GALLERY or screenshot: # always save screenshots for sphinx_gallery self.last_image = self.screenshot(screenshot, return_img=True) self.last_image_depth = self.get_image_depth() # See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270 if interactive and not self.off_screen: try: # interrupts will be caught here log.debug('Starting iren') self.iren.update_style() if not interactive_update: # Resolves #1260 if os.name == 'nt': if _vtk.VTK9: self.iren.process_events() else: global VERY_FIRST_RENDER if not VERY_FIRST_RENDER: self.iren.start() VERY_FIRST_RENDER = False self.iren.start() self.iren.initialize() except KeyboardInterrupt: log.debug('KeyboardInterrupt') self.close() raise KeyboardInterrupt # In the event that the user hits the exit-button on the GUI (on # Windows OS) then it must be finalized and deleted as accessing it # will kill the kernel. # Here we check for that and clean it up before moving on to any of # the closing routines that might try to still access that # render window. if not self.ren_win.IsCurrent(): self._clear_ren_win() # The ren_win is deleted # proper screenshots cannot be saved if this happens if not auto_close: warnings.warn( "`auto_close` ignored: by clicking the exit button, " "you have destroyed the render window and we have to " "close it out." ) auto_close = True # NOTE: after this point, nothing from the render window can be accessed # as if a user pressed the close button, then it destroys the # the render view and a stream of errors will kill the Python # kernel if code here tries to access that renderer. # See issues #135 and #186 for insight before editing the # remainder of this function. # Close the render window if requested if auto_close: self.close() # If user asked for screenshot, return as numpy array after camera # position if return_img or screenshot is True: if return_cpos: return self.camera_position, self.last_image if return_cpos: return self.camera_position def add_title(self, title, font_size=18, color=None, font=None, shadow=False): """Add text to the top center of the plot. This is merely a convenience method that calls ``add_text`` with ``position='upper_edge'``. Parameters ---------- title : str The text to add the rendering. font_size : float, optional Sets the size of the title font. Defaults to 16 or the value of the global theme if set. color : color_like, optional, Either a string, rgb list, or hex color string. Defaults to white or the value of the global theme if set. For example: * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` font : str, optional Font name may be ``'courier'``, ``'times'``, or ``'arial'``. shadow : bool, optional Adds a black shadow to the text. Defaults to ``False``. Returns ------- vtk.vtkTextActor Text actor added to plot. Examples -------- >>> import pyvista >>> pl = pyvista.Plotter() >>> pl.background_color = 'grey' >>> actor = pl.add_title('Plot Title', font='courier', color='k', ... font_size=40) >>> pl.show() """ # add additional spacing from the top of the figure by default title = '\n' + title return self.add_text( title, position='upper_edge', font_size=font_size, color=color, font=font, shadow=shadow, name='title', viewport=False, ) def add_cursor( self, bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0), focal_point=(0.0, 0.0, 0.0), color=None, ): """Add a cursor of a PyVista or VTK dataset to the scene. Parameters ---------- bounds : length 6 sequence Specify the bounds in the format of: - ``(xmin, xmax, ymin, ymax, zmin, zmax)`` Defaults to ``(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)``. focal_point : list or tuple, optional The focal point of the cursor. Defaults to ``(0.0, 0.0, 0.0)``. color : color_like, optional Either a string, RGB sequence, or hex color string. For one of the following. * ``color='white'`` * ``color='w'`` * ``color=[1.0, 1.0, 1.0]`` * ``color='#FFFFFF'`` Returns ------- vtk.vtkActor VTK actor of the 2D cursor. Examples -------- >>> import pyvista >>> sphere = pyvista.Sphere() >>> plotter = pyvista.Plotter() >>> _ = plotter.add_mesh(sphere) >>> _ = plotter.add_cursor() >>> plotter.show() """ alg = _vtk.vtkCursor3D() alg.SetModelBounds(bounds) alg.SetFocalPoint(focal_point) alg.AllOn() mapper = make_mapper(_vtk.vtkDataSetMapper) mapper.SetInputConnection(alg.GetOutputPort()) actor, prop = self.add_actor(mapper) prop.SetColor(Color(color).float_rgb) return actor # Tracks created plotters. At the end of the file as we need to # define ``BasePlotter`` before including it in the type definition. _ALL_PLOTTERS: Dict[str, BasePlotter] = {} def _kill_display(disp_id): # pragma: no cover """Forcibly close the display on Linux. See: https://gitlab.kitware.com/vtk/vtk/-/issues/17917#note_783584 And more details into why... https://stackoverflow.com/questions/64811503 Notes ----- This is to be used experimentally and is known to cause issues on `pyvistaqt` """ if platform.system() != 'Linux': raise OSError('This method only works on Linux') if disp_id: cdisp_id = int(disp_id[1:].split('_')[0], 16) # this is unsafe as events might be queued, but sometimes the # window fails to close if we don't just close it Thread(target=X11.XCloseDisplay, args=(cdisp_id,)).start()
base_build.py
from django.core.management.base import BaseCommand, CommandError from django.core.management import call_command from django.conf import settings from django.db import connection import datetime import logging from multiprocessing import Queue, Process class Command(BaseCommand): help = 'Basic functions for build scrips' logger = logging.getLogger(__name__) def add_arguments(self, parser): parser.add_argument('-p', '--proc', type=int, action='store', dest='proc', default=1, help='Number of processes to run') parser.add_argument('-t', '--test', action='store_true', dest='test', default=False, help='Include only a subset of data for testing') def prepare_input(self, proc, items, iteration=1): q = Queue() procs = list() num_items = len(items) if not num_items: return False # make sure not to use more jobs than proteins (chunk size will be 0, which is not good) if proc > num_items: proc = num_items chunk_size = int(num_items / proc) connection.close() for i in range(0, proc): first = chunk_size * i if i == proc - 1: last = False else: last = chunk_size * (i + 1) p = Process(target=self.main_func, args=([(first, last), iteration])) procs.append(p) p.start() for p in procs: p.join()
test_multiprocessing.py
#!/usr/bin/env python # # Unit tests for the multiprocessing package # import unittest import threading import Queue import time import sys import os import gc import signal import array import copy import socket import random import logging from StringIO import StringIO from test import test_support # Work around broken sem_open implementations try: import multiprocessing.synchronize except ImportError, e: from test.test_support import TestSkipped raise TestSkipped(e) import multiprocessing.dummy import multiprocessing.connection import multiprocessing.managers import multiprocessing.heap import multiprocessing.pool import _multiprocessing from multiprocessing import util # # # latin = str # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.WARNING DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.time() try: return self.func(*args, **kwds) finally: self.elapsed = time.time() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': return current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertTrue(isinstance(authkey, bytes)) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def _test(self, q, *args, **kwds): current = self.current_process() q.put(args) q.put(kwds) q.put(current.name) if self.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEquals(p.authkey, current.authkey) self.assertEquals(p.is_alive(), False) self.assertEquals(p.daemon, True) self.assertTrue(p not in self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEquals(p.exitcode, None) self.assertEquals(p.is_alive(), True) self.assertTrue(p in self.active_children()) self.assertEquals(q.get(), args[1:]) self.assertEquals(q.get(), kwargs) self.assertEquals(q.get(), p.name) if self.TYPE != 'threads': self.assertEquals(q.get(), current.authkey) self.assertEquals(q.get(), p.pid) p.join() self.assertEquals(p.exitcode, 0) self.assertEquals(p.is_alive(), False) self.assertTrue(p not in self.active_children()) def _test_terminate(self): time.sleep(1000) def test_terminate(self): if self.TYPE == 'threads': return p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertTrue(p in self.active_children()) self.assertEqual(p.exitcode, None) p.terminate() join = TimingWrapper(p.join) self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertTrue(p not in self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocessing.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertTrue(p not in self.active_children()) p.start() self.assertTrue(p in self.active_children()) p.join() self.assertTrue(p not in self.active_children()) def _test_recursion(self, wconn, id): from multiprocessing import forking wconn.send(id) if len(id) < 2: for i in range(2): p = self.Process( target=self._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) # # # class _UpperCaser(multiprocessing.Process): def __init__(self): multiprocessing.Process.__init__(self) self.child_conn, self.parent_conn = multiprocessing.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): def _test_put(self, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(Queue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() def _test_get(self, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(Queue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(Queue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(Queue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() def _test_fork(self, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(Queue.Empty, queue.get, False) p.join() def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: return q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) def _test_task_done(self, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): return workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in xrange(4)] for p in workers: p.start() for i in xrange(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': return sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): def f(self, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() # wait for them all to sleep for i in xrange(6): sleeping.acquire() # check they have all timed out for i in xrange(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() # wait for them to all sleep for i in xrange(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken time.sleep(DELTA) self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, None) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) class _TestEvent(BaseTestCase): def _test_event(self, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporaily, due to API shear, this does not # work with threading._Event objects. is_set == isSet #self.assertEqual(event.is_set(), False) self.assertEqual(wait(0.0), None) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), None) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences # self.assertEqual(event.is_set(), True) self.assertEqual(wait(), None) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), None) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) self.Process(target=self._test_event, args=(event,)).start() self.assertEqual(wait(), None) # # # class _TestValue(BaseTestCase): codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('c', latin('x'), latin('y')) ] def _test(self, values): for sv, cv in zip(values, self.codes_values): sv.value = cv[2] def test_value(self, raw=False): if self.TYPE != 'processes': return if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): if self.TYPE != 'processes': return val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): def f(self, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] def test_array(self, raw=False): if self.TYPE != 'processes': return seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.start() p.join() self.assertEqual(list(arr[:]), seq) def test_rawarray(self): self.test_array(raw=True) def test_getobj_getlock_obj(self): if self.TYPE != 'processes': return arr1 = self.Array('i', range(10)) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', range(10), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', range(10), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(range(10)) self.assertEqual(a[:], range(10)) b = self.list() self.assertEqual(b[:], []) b.extend(range(5)) self.assertEqual(b[:], range(5)) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], range(10)) d = [a, b] e = self.list(d) self.assertEqual( e[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) def test_dict(self): d = self.dict() indices = range(65, 70) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x class _TestPool(BaseTestCase): def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) self.assertEqual(pmap(sqr, range(100), chunksize=20), map(sqr, range(100))) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) get = TimingWrapper(res.get) self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, range(10)) self.assertEqual(list(it), map(sqr, range(10))) it = self.pool.imap(sqr, range(10)) for i in range(10): self.assertEqual(it.next(), i*i) self.assertRaises(StopIteration, it.next) it = self.pool.imap(sqr, range(1000), chunksize=100) for i in range(1000): self.assertEqual(it.next(), i*i) self.assertRaises(StopIteration, it.next) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, range(1000)) self.assertEqual(sorted(it), map(sqr, range(1000))) it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) self.assertEqual(sorted(it), map(sqr, range(1000))) def test_make_pool(self): p = multiprocessing.Pool(3) self.assertEqual(3, len(p._pool)) p.close() p.join() def test_terminate(self): if self.TYPE == 'manager': # On Unix a forked process increfs each shared object to # which its parent process held a reference. If the # forked process gets terminated then there is likely to # be a reference leak. So to prevent # _TestZZZNumberOfObjects from failing we skip this test # when using a manager. return result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() self.assertTrue(join.elapsed < 0.2) # # Test that manager has expected number of shared objects left # class _TestZZZNumberOfObjects(BaseTestCase): # Because test cases are sorted alphabetically, this one will get # run after all the other tests for the manager. It tests that # there have been no "reference leaks" for the manager's shared # objects. Note the comment in _TestPool.test_terminate(). ALLOWED_TYPES = ('manager',) def test_number_of_objects(self): EXPECTED_NUMBER = 1 # the pool object is still alive multiprocessing.active_children() # discard dead process objs gc.collect() # do garbage collection refs = self.manager._number_of_objects() if refs != EXPECTED_NUMBER: print self.manager._debug_info() self.assertEqual(refs, EXPECTED_NUMBER) # # Test of creating a customized manager class # from multiprocessing.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in xrange(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) manager.shutdown() # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = Queue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def _putter(self, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() queue.put(('hello world', None, True, 2.25)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER ) manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue manager.shutdown() class _TestManagerRestart(BaseTestCase): def _putter(self, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) addr = manager.get_server().address manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) manager.start() manager.shutdown() # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _echo(self, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', range(4)) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocessing.BufferTooShort, e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(IOError, reader.send, 2) self.assertRaises(IOError, writer.recv) self.assertRaises(IOError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': return msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _test(self, address): conn = self.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() # # Test of sending connection and socket objects between processes # """ class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) def _listener(self, conn, families): for fam in families: l = self.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) if self.TYPE == 'processes': l = socket.socket() l.bind(('localhost', 0)) conn.send(l.getsockname()) l.listen(1) new_conn, addr = l.accept() conn.send(new_conn) conn.recv() def _remote(self, conn): for (address, msg) in iter(conn.recv, None): client = self.connection.Client(address) client.send(msg.upper()) client.close() if self.TYPE == 'processes': address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): try: multiprocessing.allow_connection_pickling() except ImportError: return families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) if self.TYPE == 'processes': msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) if hasattr(socket, 'fromfd'): new_conn = lconn.recv() self.assertEqual(new_conn.recv(100), msg.upper()) else: # XXX On Windows with Py2.6 need to backport fromfd() discard = lconn.recv_bytes() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() """ # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # create and destroy lots of blocks of different sizes for i in xrange(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocessing.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] # get the heap object heap = multiprocessing.heap.BufferWrapper._heap # verify the state of the heap all = [] occupied = 0 for L in heap._len_to_seq.values(): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) for arena, start, stop in heap._allocated_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) # # # try: from ctypes import Structure, Value, copy, c_int, c_double except ImportError: Structure = object c_int = c_double = None class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def _double(self, x, y, foo, arr, string): x.value *= 2 y.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): if c_int is None: return x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', range(10), lock=lock) string = self.Array('c', 20, lock=lock) string.value = 'hello' p = self.Process(target=self._double, args=(x, y, foo, arr, string)) p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): if c_int is None: return foo = _Foo(2, 5.0) bar = copy(foo) foo.x = 0 foo.y = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def _test_finalize(self, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call mutliprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) # # Test that from ... import * works for each module # class _TestImportStar(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_import(self): modules = ( 'multiprocessing', 'multiprocessing.connection', 'multiprocessing.heap', 'multiprocessing.managers', 'multiprocessing.pool', 'multiprocessing.process', 'multiprocessing.reduction', 'multiprocessing.sharedctypes', 'multiprocessing.synchronize', 'multiprocessing.util' ) for name in modules: __import__(name) mod = sys.modules[name] for attr in getattr(mod, '__all__', ()): self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocessing.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) def _test_level(self, conn): logger = multiprocessing.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocessing.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocessing.Pipe(duplex=False) logger.setLevel(LEVEL1) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL1, reader.recv()) logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL2, reader.recv()) root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): def test_invalid_handles(self): if WIN32: return conn = _multiprocessing.Connection(44977608) self.assertRaises(IOError, conn.poll) self.assertRaises(IOError, _multiprocessing.Connection, -1) # # Functions used to create test cases from the base ones in this module # def get_attributes(Source, names): d = {} for name in names: obj = getattr(Source, name) if type(obj) == type(get_attributes): obj = staticmethod(obj) d[name] = obj return d def create_test_cases(Mixin, type): result = {} glob = globals() Type = type[0].upper() + type[1:] for name in glob.keys(): if name.startswith('_Test'): base = glob[name] if type in base.ALLOWED_TYPES: newname = 'With' + Type + name[1:] class Temp(base, unittest.TestCase, Mixin): pass result[newname] = Temp Temp.__name__ = newname Temp.__module__ = Mixin.__module__ return result # # Create test cases # class ProcessesMixin(object): TYPE = 'processes' Process = multiprocessing.Process locals().update(get_attributes(multiprocessing, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'RawValue', 'RawArray', 'current_process', 'active_children', 'Pipe', 'connection', 'JoinableQueue' ))) testcases_processes = create_test_cases(ProcessesMixin, type='processes') globals().update(testcases_processes) class ManagerMixin(object): TYPE = 'manager' Process = multiprocessing.Process manager = object.__new__(multiprocessing.managers.SyncManager) locals().update(get_attributes(manager, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', 'Namespace', 'JoinableQueue' ))) testcases_manager = create_test_cases(ManagerMixin, type='manager') globals().update(testcases_manager) class ThreadsMixin(object): TYPE = 'threads' Process = multiprocessing.dummy.Process locals().update(get_attributes(multiprocessing.dummy, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'current_process', 'active_children', 'Pipe', 'connection', 'dict', 'list', 'Namespace', 'JoinableQueue' ))) testcases_threads = create_test_cases(ThreadsMixin, type='threads') globals().update(testcases_threads) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocessing.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocessing.AuthenticationError, multiprocessing.connection.answer_challenge, _FakeConnection(), b'abc') # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _ThisSubProcess(q): try: item = q.get(block=False) except Queue.Empty: pass def _TestProcess(q): queue = multiprocessing.Queue() subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,)) subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocessing.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): queue = multiprocessing.Queue() proc = multiprocessing.Process(target=_TestProcess, args=(queue,)) proc.start() proc.join() def test_pool_in_process(self): p = multiprocessing.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocessing.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' testcases_other = [OtherTest, TestInvalidHandle, TestStdinBadfiledescriptor] # # # def test_main(run=None): if sys.platform.startswith("linux"): try: lock = multiprocessing.RLock() except OSError: from test.test_support import TestSkipped raise TestSkipped("OSError raises on RLock creation, see issue 3111!") if run is None: from test.test_support import run_unittest as run util.get_temp_dir() # creates temp directory for use by all processes multiprocessing.get_logger().setLevel(LOG_LEVEL) ProcessesMixin.pool = multiprocessing.Pool(4) ThreadsMixin.pool = multiprocessing.dummy.Pool(4) ManagerMixin.manager.__init__() ManagerMixin.manager.start() ManagerMixin.pool = ManagerMixin.manager.Pool(4) testcases = ( sorted(testcases_processes.values(), key=lambda tc:tc.__name__) + sorted(testcases_threads.values(), key=lambda tc:tc.__name__) + sorted(testcases_manager.values(), key=lambda tc:tc.__name__) + testcases_other ) loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) with test_support._check_py3k_warnings( (".+__(get|set)slice__ has been removed", DeprecationWarning)): run(suite) ThreadsMixin.pool.terminate() ProcessesMixin.pool.terminate() ManagerMixin.pool.terminate() ManagerMixin.manager.shutdown() del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool def main(): test_main(unittest.TextTestRunner(verbosity=2).run) if __name__ == '__main__': main()
threadModuleAPI.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- # *****************************************************************************/ # * Authors: Joseph Tarango # *****************************************************************************/ # @package threadModuleAPI import optparse, datetime, traceback, pprint, os, threading, subprocess, re, psutil, sys, concurrent.futures, itertools, time, typing from src.software.debug import whoami def getAvailableCPUCount(): """ Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program""" coreThreadsAvailable = 1 # cpuset may restrict the number of *available* processors try: import multiprocessing coreThreadsAvailable = multiprocessing.cpu_count() return coreThreadsAvailable except (ImportError, NotImplementedError): pass try: coreThreadsAvailable = psutil.cpu_count() # psutil.NUM_CPUS on old versions return coreThreadsAvailable except (ImportError, AttributeError): pass except OSError: pass # raise Exception('Can not determine.) print('Can not determine number of CPUs on this system, so defaulting to 1.') return coreThreadsAvailable class MultiThreadFL(): """ Multi-thread a function with multible items. """ @staticmethod def threadProcessLoop(items=None, start=None, end=None, threadFunction=None): if items is None: return negOne = (-1) if end > len(items) or start < (len(items) * negOne): return for item in items[start:end]: try: threadFunction(item) except Exception: print('error with item') return @staticmethod def threadSplitProcessing(items=None, num_splits=None, threadProcessFunction=None): if items is None: return if num_splits is None: numSplits = min(getAvailableCPUCount(), len(items)) else: numSplits = num_splits split_size = len(items) // numSplits threads = [] for i in range(numSplits): # Determine the indices of the list this thread will handle start = i * split_size # Special case on the last chunk to account for uneven splits end = None if i + 1 == numSplits else (i + 1) * split_size # Create the thread threads.append(threading.Thread(target=threadProcessFunction, args=(items, start, end))) threads[-1].start() # Start the thread we just created # Wait for all threads to finish for thread in threads: thread.join() return def threadLoop(self, items=None, start=None, end=None, num_splits=None, functionProcess=None): self.threadSplitProcessing(items=items, num_splits=num_splits, threadProcessFunction=self.threadProcessLoop(items=items, start=start, end=end, threadFunction=functionProcess)) return class MassiveParallelismSingleFunctionManyParameters(): # High performance massive parallelism class in which a single function context with a list of parameters to pass. def __init__(self, debug: bool = False, functionName=None, fParameters: typing.List[dict] = None, workers: int = None, timeOut: int = (60 * 60 * 24), # Default time is 60 seconds * 60 minutes * 24 hours = 1 day in seconds per thread inOrder: bool = True, runSequential: bool = False): self.debug = debug self.functionName = functionName self.fParameters = fParameters self.workers = workers # Pool of workers self.timeOut = timeOut self.inOrder = inOrder self.resultsList = list() self.encounteredExceptions = 0 self.exceptionFoundList = list() self.alreadyExecuted = False self.areResultsReady = False self.startTime = None self.endTime = None self.totalTime = None self.runSequential = runSequential return def setFunctionName(self, functionName): self.functionName = functionName return def setParametersList(self, fParameters: typing.List[dict]): """Sets parameter list from dictionary parameter context. Args: fParameters: list of dictionaries of parameters Returns: None Example kwargsList_input = [{'inputINI': dataFileNameA, ... 'debug': debug, 'inParallel': inParallelA, 'requiredList': requiredListA}, ... {'inputINI': dataFileNameZ, ... 'debug': debugZ, 'inParallel': inParallelZ, 'requiredList': requiredListZ}] """ self.fParameters = fParameters return def getExceptionInfo(self): return self.encounteredExceptions, self.exceptionFoundList def getExecutionTime(self): return self.startTime, self.endTime, self.totalTime def getResults(self): return self.resultsList def _inOrderConcurrentMap(self): """Function that utilises concurrent.futures.ProcessPoolExecutor.map returning inorder in a parallelised manner. Returns: results list in order of list given """ self.alreadyExecuted = True # Local variables functionContextList = list() with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers) as executer: # Discretise workload and submit to worker pool mapperMeta = executer.map(lambda parametersList: self.functionName(**parametersList), self.fParameters, timeout=self.timeOut) self.resultsList.append(mapperMeta) # Access results in order. for parallelProcessItem in functionContextList: try: self.resultsList.append(parallelProcessItem) except BaseException as errorObj: self.encounteredExceptions += 1 exceptionContext = f" {whoami()} with {errorObj}! Timeout={self.timeOut}" self.exceptionFoundList.append(exceptionContext) if self.debug: print(exceptionContext) self.areResultsReady = True return self.resultsList def _anyOrderConcurrentMap(self): """Function that utilises concurrent.futures.ProcessPoolExecutor.map returning in any order in a parallelised manner. Returns: results list no particular order of list given. """ self.alreadyExecuted = True # Local variables functionContextList = list() # Parallelization with concurrent.futures.ProcessPoolExecutor(max_workers=self.workers) as executor: # Discretise workload and submit to worker pool for fParameterContext in self.fParameters: try: functionContextList.append(executor.submit(self.functionName, fParameterContext)) except BaseException as errorObj: self.encounteredExceptions += 1 exceptionContext = f" {whoami()} with {errorObj}! Timeout={self.timeOut}" self.exceptionFoundList.append(exceptionContext) if self.debug: print(exceptionContext) # Skip the copying of the data to another array and use itertools.chain.from_iterable to combine the results from execution to single iterable self.resultsList = itertools.chain.from_iterable(f.result() for f in concurrent.futures.as_completed(functionContextList, timeout=self.timeOut)) self.areResultsReady = True return self.resultsList def _sequentialMap(self): """Function that executions a function with a list of parameters. Returns: results list in order of list given. """ self.alreadyExecuted = True # Discretise workload and submit worker try: for fParameterContext in self.fParameters: sResult = self.functionName(**fParameterContext) (self.resultsList).append(sResult) except BaseException as errorObj: self.encounteredExceptions += 1 exceptionContext = f" {whoami()} with {errorObj}! Timeout={self.timeOut}" self.exceptionFoundList.append(exceptionContext) if self.debug: print(exceptionContext) self.areResultsReady = True return self.resultsList def execute(self): if self.alreadyExecuted is False: self.startTime = time.time() if self.debug: print(f"Threads, start time token {self.startTime}") if self.runSequential is True: if self.debug: print(" Processing sequentially in order of parameter list...") self._sequentialMap() elif self.runSequential is False and self.inOrder is True: if self.debug: print(" Processing in-order of parameters list...") self._inOrderConcurrentMap() elif self.runSequential is False and self.inOrder is False: if self.debug: print(" Processing out-of-order of parameters list...") self._anyOrderConcurrentMap() else: if self.debug: print(" Fault in configuration... running sequentially...") self._sequentialMap() self.endTime = time.time() if self.debug: print(f"End time token {self.endTime}") self.totalTime = self.endTime - self.startTime if self.debug: print("Threads executed {0} at {1:.4f} seconds with {2} workers".format(len(self.resultsList), self.totalTime, self.workers)) return self.resultsList class MassiveParallelismanyFunctionManyParameters(): # High performance massive parallelism class in which a single function context with a list of parameters to pass. def __init__(self, debug: bool = False, functionName_fParameters=None, workers: int = None, timeOut: int = (60 * 60 * 24), # Default time is 60 seconds * 60 minutes * 24 hours = 1 day in seconds per thread inOrder: bool = True, runSequential: bool = False): self.debug = debug self.functionName_fParameters = functionName_fParameters self.workers = workers # Pool of workers self.timeOut = timeOut self.inOrder = inOrder self.resultsList = list() self.encounteredExceptions = 0 self.exceptionFoundList = list() self.alreadyExecuted = False self.areResultsReady = False self.startTime = None self.endTime = None self.totalTime = None self.runSequential = runSequential self.validInput = False # verify input types if isinstance(functionName_fParameters, list): for functionName, fParameters in functionName_fParameters: if isinstance(fParameters, list) and self.validInput is False: for fParametersItem in fParameters: if isinstance(fParametersItem, dict) and self.validInput is False: self.validInput = True break elif self.validInput is True: break return def execute(self): if isinstance(self.functionName_fParameters, list): for functionName, fParameters in self.functionName_fParameters: try: functionContext = MassiveParallelismSingleFunctionManyParameters(debug=self.debug, functionName=functionName, fParameters=fParameters, workers=self.workers, timeOut=self.timeOut, # Default time is 60 seconds * 60 minutes * 24 hours = 1 day in seconds per thread inOrder=self.inOrder, runSequential=self.runSequential) iResults = functionContext.execute() self.resultsList.append(iResults) except BaseException as errorObj: if self.debug: print(f"{whoami()} with {errorObj}") pass return def API(options=None): """ API for the default application in the graphical interface. Args: options: Commandline inputs. Returns: """ if options.debug: print("Options are:\n{0}\n".format(options)) ############################################################################### # Graphical User Interface (GUI) Configuration ############################################################################### print("options: ", str(options.mode)) pprint.pformat(locals(), indent=3, width=100) return 0 def main(): ############################################## # Main function, Options ############################################## parser = optparse.OptionParser() parser.add_option("--example", action='store_true', dest='example', default=False, help='Show command execution example.') parser.add_option("--debug", action='store_true', dest='debug', default=True, help='Debug mode.') parser.add_option("--more", dest='more', default=False, help="Displays more options.") parser.add_option("--mode", dest='mode', default=1, help="Mode of Operation.") (options, args) = parser.parse_args() ############################################## # Main ############################################## API(options) return 0 if __name__ == '__main__': """Performs execution delta of the process.""" p = datetime.datetime.now() try: main() except Exception as e: print("Fail End Process: ", e) traceback.print_exc() q = datetime.datetime.now() print("Execution time: " + str(q - p))
netcdf.py
#!/usr/bin/env pytest # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test NetCDF driver support. # Author: Frank Warmerdam <warmerdam@pobox.com> # ############################################################################### # Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com> # Copyright (c) 2008-2016, Even Rouault <even.rouault at spatialys.com> # Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys import shutil import struct from osgeo import gdal from osgeo import ogr from osgeo import osr import pytest import gdaltest import test_cli_utilities from uffd import uffd_compare ############################################################################### # Netcdf Functions ############################################################################### ############################################################################### # Get netcdf version and test for supported files @pytest.fixture(autouse=True, scope='module') def netcdf_setup(): # NOTE: this is also used by netcdf_cf.py gdaltest.netcdf_drv_version = 'unknown' gdaltest.netcdf_drv_has_nc2 = False gdaltest.netcdf_drv_has_nc4 = False gdaltest.netcdf_drv_has_hdf4 = False gdaltest.netcdf_drv_silent = False gdaltest.netcdf_drv = gdal.GetDriverByName('NETCDF') if gdaltest.netcdf_drv is None: pytest.skip('NOTICE: netcdf not supported, skipping checks') # get capabilities from driver metadata = gdaltest.netcdf_drv.GetMetadata() if metadata is None: pytest.skip('NOTICE: netcdf metadata not found, skipping checks') # netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $ # netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $ if 'NETCDF_VERSION' in metadata: v = metadata['NETCDF_VERSION'] v = v[0: v.find(' ')].strip('"') gdaltest.netcdf_drv_version = v if 'NETCDF_HAS_NC2' in metadata \ and metadata['NETCDF_HAS_NC2'] == 'YES': gdaltest.netcdf_drv_has_nc2 = True if 'NETCDF_HAS_NC4' in metadata \ and metadata['NETCDF_HAS_NC4'] == 'YES': gdaltest.netcdf_drv_has_nc4 = True if 'NETCDF_HAS_HDF4' in metadata \ and metadata['NETCDF_HAS_HDF4'] == 'YES': gdaltest.netcdf_drv_has_hdf4 = True print('NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version + ' has_nc2: ' + str(gdaltest.netcdf_drv_has_nc2) + ' has_nc4: ' + str(gdaltest.netcdf_drv_has_nc4)) gdaltest.count_opened_files = len(gdaltest.get_opened_files()) @pytest.fixture(autouse=True, scope='module') def netcdf_teardown(): diff = len(gdaltest.get_opened_files()) - gdaltest.count_opened_files assert diff == 0, 'Leak of file handles: %d leaked' % diff ############################################################################### # test file copy # helper function needed so we can call Process() on it from netcdf_test_copy_timeout() def netcdf_test_copy(ifile, band, checksum, ofile, opts=None, driver='NETCDF'): # pylint: disable=unused-argument opts = [] if opts is None else opts test = gdaltest.GDALTest('NETCDF', '../' + ifile, band, checksum, options=opts) return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy=0, check_minmax=0) ############################################################################### # test file copy, optional timeout arg def netcdf_test_copy_timeout(ifile, band, checksum, ofile, opts=None, driver='NETCDF', timeout=None): from multiprocessing import Process drv = gdal.GetDriverByName(driver) if os.path.exists(ofile): drv.Delete(ofile) if timeout is None: netcdf_test_copy(ifile, band, checksum, ofile, opts, driver) else: sys.stdout.write('.') sys.stdout.flush() proc = Process(target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts)) proc.start() proc.join(timeout) # if proc is alive after timeout we must terminate it, and return fail # valgrind detects memory leaks when this occurs (although it should never happen) if proc.is_alive(): proc.terminate() if os.path.exists(ofile): drv.Delete(ofile) print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout)) pytest.fail() ############################################################################### # check support for DEFLATE compression, requires HDF5 and zlib def netcdf_test_deflate(ifile, checksum, zlevel=1, timeout=None): try: from multiprocessing import Process Process.is_alive except (ImportError, AttributeError): pytest.skip('from multiprocessing import Process failed') if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc' ofile1_opts = ['FORMAT=NC4C', 'COMPRESS=NONE'] ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc' ofile2_opts = ['FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL=' + str(zlevel)] assert os.path.exists(ifile), ('ifile %s does not exist' % ifile) netcdf_test_copy_timeout(ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout) netcdf_test_copy_timeout(ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout) # make sure compressed file is smaller than uncompressed files try: size1 = os.path.getsize(ofile1) size2 = os.path.getsize(ofile2) except OSError: pytest.fail('Error getting file sizes.') assert size2 < size1, \ 'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation' ############################################################################### # check support for reading attributes (single values and array values) def netcdf_check_vars(ifile, vals_global=None, vals_band=None): src_ds = gdal.Open(ifile) assert src_ds is not None, ('could not open dataset ' + ifile) metadata_global = src_ds.GetMetadata() assert metadata_global is not None, ('could not get global metadata from ' + ifile) missval = src_ds.GetRasterBand(1).GetNoDataValue() assert missval == 1, ('got invalid nodata value %s for Band' % str(missval)) metadata_band = src_ds.GetRasterBand(1).GetMetadata() assert metadata_band is not None, 'could not get Band metadata' metadata = metadata_global vals = vals_global if vals is None: vals = dict() for k, v in vals.items(): assert k in metadata, ("missing metadata [%s]" % (str(k))) # strip { and } as new driver uses these for array values mk = metadata[k].lstrip('{ ').rstrip('} ') assert mk == v, ("invalid value [%s] for metadata [%s]=[%s]" % (str(mk), str(k), str(v))) metadata = metadata_band vals = vals_band if vals is None: vals = dict() for k, v in vals.items(): assert k in metadata, ("missing metadata [%s]" % (str(k))) # strip { and } as new driver uses these for array values mk = metadata[k].lstrip('{ ').rstrip('} ') assert mk == v, ("invalid value [%s] for metadata [%s]=[%s]" % (str(mk), str(k), str(v))) ############################################################################### # Netcdf Tests ############################################################################### ############################################################################### # Perform simple read test. def test_netcdf_1(): if gdaltest.netcdf_drv is None: pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'NETCDF:"data/netcdf/bug636.nc":tas', 1, 31621, filename_absolute=1) # We don't want to gum up the test stream output with the # 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message. gdal.PushErrorHandler('CPLQuietErrorHandler') tst.testOpen() gdal.PopErrorHandler() ############################################################################### # Verify a simple createcopy operation. We can't do the trivial gdaltest # operation because the new file will only be accessible via subdatasets. def test_netcdf_2(): if gdaltest.netcdf_drv is None: pytest.skip() src_ds = gdal.Open('data/byte.tif') gdaltest.netcdf_drv.CreateCopy('tmp/netcdf2.nc', src_ds) tst = gdaltest.GDALTest('NetCDF', 'tmp/netcdf2.nc', 1, 4672, filename_absolute=1) wkt = """PROJCS["NAD27 / UTM zone 11N", GEOGCS["NAD27", DATUM["North_American_Datum_1927", SPHEROID["Clarke 1866",6378206.4,294.9786982139006, AUTHORITY["EPSG","7008"]], AUTHORITY["EPSG","6267"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4267"]], PROJECTION["Transverse_Mercator"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",-117], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AUTHORITY["EPSG","26711"]]""" tst.testOpen(check_prj=wkt) # Check that no nodata value is reported for a Byte dataset ds = gdal.Open('tmp/netcdf2.nc') assert ds.GetRasterBand(1).GetNoDataValue() is None ds = None # Test that in raster-only mode, update isn't supported (not sure what would be missing for that...) with gdaltest.error_handler(): ds = gdal.Open('tmp/netcdf2.nc', gdal.GA_Update) assert ds is None gdaltest.clean_tmp() ############################################################################### def test_netcdf_3(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/sombrero.grd') bnd = ds.GetRasterBand(1) minmax = bnd.ComputeRasterMinMax() assert minmax[0] == pytest.approx((-0.675758), abs=0.000001) and minmax[1] == pytest.approx(1.0, abs=0.000001), \ 'Wrong min or max.' bnd = None ds = None ############################################################################### # In #2582 5dimensional files were causing problems. Verify use ok. def test_netcdf_4(): if gdaltest.netcdf_drv is None: pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'NETCDF:data/netcdf/foo_5dimensional.nc:temperature', 3, 1218, filename_absolute=1) # We don't want to gum up the test stream output with the # 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message. gdal.PushErrorHandler('CPLQuietErrorHandler') # don't test for checksum (see bug #4284) result = tst.testOpen(skip_checksum=True) gdal.PopErrorHandler() return result ############################################################################### # In #2583 5dimensional files were having problems unrolling the highest # dimension - check handling now on band 7. def test_netcdf_5(): if gdaltest.netcdf_drv is None: pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'NETCDF:data/netcdf/foo_5dimensional.nc:temperature', 7, 1227, filename_absolute=1) # We don't want to gum up the test stream output with the # 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message. gdal.PushErrorHandler('CPLQuietErrorHandler') # don't test for checksum (see bug #4284) result = tst.testOpen(skip_checksum=True) gdal.PopErrorHandler() return result ############################################################################### # ticket #3324 check spatial reference reading for cf-1.4 lambert conformal # 1 standard parallel. def test_netcdf_6(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_lcc1sp.nc') prj = ds.GetProjection() sr = osr.SpatialReference() sr.ImportFromWkt(prj) lat_origin = sr.GetProjParm('latitude_of_origin') assert lat_origin == 25, ('Latitude of origin does not match expected:\n%f' % lat_origin) ds = None ############################################################################### # ticket #3324 check spatial reference reading for cf-1.4 lambert conformal # 2 standard parallels. def test_netcdf_7(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_lcc2sp.nc') prj = ds.GetProjection() sr = osr.SpatialReference() sr.ImportFromWkt(prj) std_p1 = sr.GetProjParm('standard_parallel_1') std_p2 = sr.GetProjParm('standard_parallel_2') assert std_p1 == 33.0 and std_p2 == 45.0, \ ('Standard Parallels do not match expected:\n%f,%f' % (std_p1, std_p2)) ds = None sr = None ############################################################################### # check for cf convention read of albers equal area # Previous version compared entire wkt, which varies slightly among driver versions # now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters def test_netcdf_8(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_aea2sp_invf.nc') srs = osr.SpatialReference() srs.ImportFromWkt(ds.GetProjection()) proj = srs.GetAttrValue('PROJECTION') assert proj == 'Albers_Conic_Equal_Area', \ ('Projection does not match expected : ' + proj) param = srs.GetProjParm('latitude_of_center') assert param == 37.5, ('Got wrong parameter value (%g)' % param) param = srs.GetProjParm('longitude_of_center') assert param == -96, ('Got wrong parameter value (%g)' % param) ds = None ############################################################################### # check to see if projected systems default to wgs84 if no spheroid def def test_netcdf_9(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_no_sphere.nc') prj = ds.GetProjection() sr = osr.SpatialReference() sr.ImportFromWkt(prj) spheroid = sr.GetAttrValue('SPHEROID') assert spheroid == 'WGS 84', ('Incorrect spheroid read from file\n%s' % (spheroid)) ds = None sr = None ############################################################################### # check if km pixel size makes it through to gt def test_netcdf_10(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_no_sphere.nc') prj = ds.GetProjection() gt = ds.GetGeoTransform() gt1 = (-1897186.0290038721, 5079.3608398440065, 0.0, 2674684.0244560046, 0.0, -5079.4721679684635) gt2 = (-1897.186029003872, 5.079360839844003, 0.0, 2674.6840244560044, 0.0, -5.079472167968456) if gt != gt1: sr = osr.SpatialReference() sr.ImportFromWkt(prj) # new driver uses UNIT vattribute instead of scaling values assert (sr.GetAttrValue("PROJCS|UNIT", 1) == "1000" and gt == gt2), \ ('Incorrect geotransform, got ' + str(gt)) ds = None ############################################################################### # check if ll gets caught in km pixel size check def test_netcdf_11(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/cf_geog.nc') gt = ds.GetGeoTransform() assert gt == (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0), 'Incorrect geotransform' ds = None ############################################################################### # check for scale/offset set/get. def test_netcdf_12(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/scale_offset.nc') scale = ds.GetRasterBand(1).GetScale() offset = ds.GetRasterBand(1).GetOffset() assert scale == 0.01 and offset == 1.5 gdaltest.netcdf_drv.CreateCopy('tmp/tmp.nc', ds) ds = None ds = gdal.Open('tmp/tmp.nc') scale = ds.GetRasterBand(1).GetScale() offset = ds.GetRasterBand(1).GetOffset() assert scale == 0.01 and offset == 1.5 ds = None gdaltest.netcdf_drv.Delete('tmp/tmp.nc') ############################################################################### # check for scale/offset = None if no scale or offset is available def test_netcdf_13(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/no_scale_offset.nc') scale = ds.GetRasterBand(1).GetScale() offset = ds.GetRasterBand(1).GetOffset() assert scale is None and offset is None, 'Incorrect scale or offset' ds = None ############################################################################### # check for scale/offset for two variables def test_netcdf_14(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('NETCDF:data/netcdf/two_vars_scale_offset.nc:z') scale = ds.GetRasterBand(1).GetScale() offset = ds.GetRasterBand(1).GetOffset() assert scale == 0.01 and offset == 1.5, \ ('Incorrect scale(%f) or offset(%f)' % (scale, offset)) ds = None ds = gdal.Open('NETCDF:data/netcdf/two_vars_scale_offset.nc:q') scale = ds.GetRasterBand(1).GetScale() offset = ds.GetRasterBand(1).GetOffset() assert scale == 0.1 and offset == 2.5, \ ('Incorrect scale(%f) or offset(%f)' % (scale, offset)) ############################################################################### # check support for netcdf-2 (64 bit) # This test fails in 1.8.1, because the driver does not support NC2 (bug #3890) def test_netcdf_15(): if gdaltest.netcdf_drv is None: pytest.skip() if gdaltest.netcdf_drv_has_nc2: ds = gdal.Open('data/netcdf/trmm-nc2.nc') assert ds is not None ds = None return else: pytest.skip() ############################################################################### # check support for netcdf-4 def test_netcdf_16(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/trmm-nc4.nc' if gdaltest.netcdf_drv_has_nc4: # test with Open() ds = gdal.Open(ifile) if ds is None: pytest.fail('GDAL did not open file') else: name = ds.GetDriver().GetDescription() ds = None # return fail if did not open with the netCDF driver (i.e. HDF5Image) assert name == 'netCDF', 'netcdf driver did not open file' # test with Identify() name = gdal.IdentifyDriver(ifile).GetDescription() assert name == 'netCDF', 'netcdf driver did not identify file' else: pytest.skip() ############################################################################### # check support for netcdf-4 - make sure hdf5 is not read by netcdf driver def test_netcdf_17(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/hdf5/groups.h5' # skip test if Hdf5 is not enabled if gdal.GetDriverByName('HDF5') is None and \ gdal.GetDriverByName('HDF5Image') is None: pytest.skip() if gdaltest.netcdf_drv_has_nc4: # test with Open() ds = gdal.Open(ifile) if ds is None: pytest.fail('GDAL did not open hdf5 file') else: name = ds.GetDriver().GetDescription() ds = None # return fail if opened with the netCDF driver assert name != 'netCDF', 'netcdf driver opened hdf5 file' # test with Identify() name = gdal.IdentifyDriver(ifile).GetDescription() assert name != 'netCDF', 'netcdf driver was identified for hdf5 file' else: pytest.skip() ############################################################################### # check support for netcdf-4 classic (NC4C) def test_netcdf_18(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/trmm-nc4c.nc' if gdaltest.netcdf_drv_has_nc4: # test with Open() ds = gdal.Open(ifile) if ds is None: pytest.fail() else: name = ds.GetDriver().GetDescription() ds = None # return fail if did not open with the netCDF driver (i.e. HDF5Image) assert name == 'netCDF' # test with Identify() name = gdal.IdentifyDriver(ifile).GetDescription() assert name == 'netCDF' else: pytest.skip() ############################################################################### # check support for reading with DEFLATE compression, requires NC4 def test_netcdf_19(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'data/netcdf/trmm-nc4z.nc', 1, 50235, filename_absolute=1) result = tst.testOpen(skip_checksum=True) return result ############################################################################### # check support for writing with DEFLATE compression, requires NC4 def test_netcdf_20(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # simple test with tiny file return netcdf_test_deflate('data/utm.tif', 50235) ############################################################################### # check support for writing large file with DEFLATE compression # if chunking is not defined properly within the netcdf driver, this test can take 1h def test_netcdf_21(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() if not gdaltest.run_slow_tests(): pytest.skip() bigfile = 'tmp/cache/utm-big.tif' sys.stdout.write('.') sys.stdout.flush() # create cache dir if absent if not os.path.exists('tmp/cache'): os.mkdir('tmp/cache') # look for large gtiff in cache if not os.path.exists(bigfile): # create large gtiff if test_cli_utilities.get_gdalwarp_path() is None: pytest.skip('gdalwarp not found') warp_cmd = test_cli_utilities.get_gdalwarp_path() +\ ' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\ 'data/utm.tif ' + bigfile try: (ret, err) = gdaltest.runexternal_out_and_err(warp_cmd) except OSError: pytest.fail('gdalwarp execution failed') assert not (err != '' or ret != ''), \ ('gdalwarp returned error\n' + str(ret) + ' ' + str(err)) # test compression of the file, with a conservative timeout of 60 seconds return netcdf_test_deflate(bigfile, 26695, 6, 60) ############################################################################### # check support for hdf4 def test_netcdf_22(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_hdf4: pytest.skip() ifile = 'data/hdf4/hdifftst2.hdf' # suppress warning gdal.PushErrorHandler('CPLQuietErrorHandler') ds = gdal.Open('NETCDF:' + ifile) gdal.PopErrorHandler() if ds is None: pytest.fail('netcdf driver did not open hdf4 file') else: ds = None ############################################################################### # check support for hdf4 - make sure hdf4 file is not read by netcdf driver def test_netcdf_23(): # don't skip if netcdf is not enabled in GDAL # if gdaltest.netcdf_drv is None: # return 'skip' # if not gdaltest.netcdf_drv_has_hdf4: # return 'skip' # skip test if Hdf4 is not enabled in GDAL if gdal.GetDriverByName('HDF4') is None and \ gdal.GetDriverByName('HDF4Image') is None: pytest.skip() ifile = 'data/hdf4/hdifftst2.hdf' # test with Open() ds = gdal.Open(ifile) if ds is None: pytest.fail('GDAL did not open hdf4 file') else: name = ds.GetDriver().GetDescription() ds = None # return fail if opened with the netCDF driver assert name != 'netCDF', 'netcdf driver opened hdf4 file' # test with Identify() name = gdal.IdentifyDriver(ifile).GetDescription() assert name != 'netCDF', 'netcdf driver was identified for hdf4 file' ############################################################################### # check support for reading attributes (single values and array values) def test_netcdf_24(): if gdaltest.netcdf_drv is None: pytest.skip() vals_global = {'NC_GLOBAL#test': 'testval', 'NC_GLOBAL#valid_range_i': '0,255', 'NC_GLOBAL#valid_min': '10.1', 'NC_GLOBAL#test_b': '1'} vals_band = {'_Unsigned': 'true', 'valid_min': '10.1', 'valid_range_b': '1,10', 'valid_range_d': '0.1111112222222,255.555555555556', 'valid_range_f': '0.1111111,255.5556', 'valid_range_s': '0,255'} return netcdf_check_vars('data/netcdf/nc_vars.nc', vals_global, vals_band) ############################################################################### # check support for NC4 reading attributes (single values and array values) def netcdf_24_nc4(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() vals_global = {'NC_GLOBAL#test': 'testval', 'NC_GLOBAL#test_string': 'testval_string', 'NC_GLOBAL#valid_range_i': '0,255', 'NC_GLOBAL#valid_min': '10.1', 'NC_GLOBAL#test_b': '-100', 'NC_GLOBAL#test_ub': '200', 'NC_GLOBAL#test_s': '-16000', 'NC_GLOBAL#test_us': '32000', 'NC_GLOBAL#test_l': '-2000000000', 'NC_GLOBAL#test_ul': '4000000000'} vals_band = {'test_string_arr': 'test,string,arr', 'valid_min': '10.1', 'valid_range_b': '1,10', 'valid_range_ub': '1,200', 'valid_range_s': '0,255', 'valid_range_us': '0,32000', 'valid_range_l': '0,255', 'valid_range_ul': '0,4000000000', 'valid_range_d': '0.1111112222222,255.555555555556', 'valid_range_f': '0.1111111,255.5556'} return netcdf_check_vars('data/netcdf/nc4_vars.nc', vals_global, vals_band) ############################################################################### # check support for writing attributes (single values and array values) def test_netcdf_25(): if gdaltest.netcdf_drv is None: pytest.skip() netcdf_test_copy('data/netcdf/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc') vals_global = {'NC_GLOBAL#test': 'testval', 'NC_GLOBAL#valid_range_i': '0,255', 'NC_GLOBAL#valid_min': '10.1', 'NC_GLOBAL#test_b': '1'} vals_band = {'_Unsigned': 'true', 'valid_min': '10.1', 'valid_range_b': '1,10', 'valid_range_d': '0.1111112222222,255.555555555556', 'valid_range_f': '0.1111111,255.5556', 'valid_range_s': '0,255'} return netcdf_check_vars('tmp/netcdf_25.nc', vals_global, vals_band) ############################################################################### # check support for NC4 writing attributes (single values and array values) def netcdf_25_nc4(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() netcdf_test_copy('data/netcdf/nc4_vars.nc', 1, None, 'tmp/netcdf_25_nc4.nc', ['FORMAT=NC4']) vals_global = {'NC_GLOBAL#test': 'testval', 'NC_GLOBAL#test_string': 'testval_string', 'NC_GLOBAL#valid_range_i': '0,255', 'NC_GLOBAL#valid_min': '10.1', 'NC_GLOBAL#test_b': '-100', 'NC_GLOBAL#test_ub': '200', 'NC_GLOBAL#test_s': '-16000', 'NC_GLOBAL#test_us': '32000', 'NC_GLOBAL#test_l': '-2000000000', 'NC_GLOBAL#test_ul': '4000000000'} vals_band = {'test_string_arr': 'test,string,arr', 'valid_min': '10.1', 'valid_range_b': '1,10', 'valid_range_ub': '1,200', 'valid_range_us': '0,32000', 'valid_range_l': '0,255', 'valid_range_ul': '0,4000000000', 'valid_range_d': '0.1111112222222,255.555555555556', 'valid_range_f': '0.1111111,255.5556', 'valid_range_s': '0,255'} return netcdf_check_vars('tmp/netcdf_25_nc4.nc', vals_global, vals_band) ############################################################################### # check support for WRITE_BOTTOMUP file creation option # use a dummy file with no lon/lat info to force a different checksum # depending on y-axis order def test_netcdf_26(): if gdaltest.netcdf_drv is None: pytest.skip() # test default config test = gdaltest.GDALTest('NETCDF', 'netcdf/int16-nogeo.nc', 1, 4672) gdal.PushErrorHandler('CPLQuietErrorHandler') test.testCreateCopy(check_gt=0, check_srs=0, check_minmax=0) gdal.PopErrorHandler() # test WRITE_BOTTOMUP=NO test = gdaltest.GDALTest('NETCDF', 'netcdf/int16-nogeo.nc', 1, 4855, options=['WRITE_BOTTOMUP=NO']) test.testCreateCopy(check_gt=0, check_srs=0, check_minmax=0) ############################################################################### # check support for GDAL_NETCDF_BOTTOMUP configuration option def test_netcdf_27(): if gdaltest.netcdf_drv is None: pytest.skip() # test default config test = gdaltest.GDALTest('NETCDF', 'netcdf/int16-nogeo.nc', 1, 4672) config_bak = gdal.GetConfigOption('GDAL_NETCDF_BOTTOMUP') gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', None) test.testOpen() gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', config_bak) # test GDAL_NETCDF_BOTTOMUP=NO test = gdaltest.GDALTest('NETCDF', 'netcdf/int16-nogeo.nc', 1, 4855) config_bak = gdal.GetConfigOption('GDAL_NETCDF_BOTTOMUP') gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'NO') test.testOpen() gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', config_bak) ############################################################################### # check support for writing multi-dimensional files (helper function) def netcdf_test_4dfile(ofile): # test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets) ds = gdal.Open(ofile) assert ds is not None, 'open of copy failed' md = ds.GetMetadata('SUBDATASETS') subds_count = 0 if md is not None: subds_count = len(md) / 2 assert ds.RasterCount == 8 and subds_count == 0, \ ('copy has %d bands (expected 8) and has %d subdatasets' ' (expected 0)' % (ds.RasterCount, subds_count)) ds = None # get file header with ncdump (if available) try: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h') except OSError: print('NOTICE: ncdump not found') return if err is None or 'netcdf library version' not in err: print('NOTICE: ncdump not found') return (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h ' + ofile) assert ret != '' and err == '', 'ncdump failed' # simple dimension tests using ncdump output err = "" if 'int t(time, levelist, lat, lon) ;' not in ret: err = err + 'variable (t) has wrong dimensions or is missing\n' if 'levelist = 2 ;' not in ret: err = err + 'levelist dimension is missing or incorrect\n' if 'int levelist(levelist) ;' not in ret: err = err + 'levelist variable is missing or incorrect\n' if 'time = 4 ;' not in ret: err = err + 'time dimension is missing or incorrect\n' if 'double time(time) ;' not in ret: err = err + 'time variable is missing or incorrect\n' # uncomment this to get full header in output # if err != '': # err = err + ret assert err == '' ############################################################################### # check support for writing multi-dimensional files using CreateCopy() def test_netcdf_28(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/netcdf-4d.nc' ofile = 'tmp/netcdf_28.nc' # copy file netcdf_test_copy(ifile, 0, None, ofile) # test file return netcdf_test_4dfile(ofile) ############################################################################### # Check support for writing multi-dimensional files using gdalwarp. # Requires metadata copy support in gdalwarp (see bug #3898). # First create a vrt file using gdalwarp, then copy file to netcdf. # The workaround is (currently ??) necessary because dimension rolling code is # in netCDFDataset::CreateCopy() and necessary dimension metadata # is not saved to netcdf when using gdalwarp (as the driver does not write # metadata to netcdf file with SetMetadata() and SetMetadataItem()). def test_netcdf_29(): if gdaltest.netcdf_drv is None: pytest.skip() # create tif file using gdalwarp if test_cli_utilities.get_gdalwarp_path() is None: pytest.skip('gdalwarp not found') ifile = 'data/netcdf/netcdf-4d.nc' ofile1 = 'tmp/netcdf_29.vrt' ofile = 'tmp/netcdf_29.nc' warp_cmd = '%s -q -overwrite -of vrt %s %s' %\ (test_cli_utilities.get_gdalwarp_path(), ifile, ofile1) try: (ret, err) = gdaltest.runexternal_out_and_err(warp_cmd) except OSError: pytest.fail('gdalwarp execution failed') assert not (err != '' or ret != ''), \ ('gdalwarp returned error\n' + str(ret) + ' ' + str(err)) # copy vrt to netcdf, with proper dimension rolling netcdf_test_copy(ofile1, 0, None, ofile) # test file netcdf_test_4dfile(ofile) ############################################################################### # check support for file with nan values (bug #4705) def test_netcdf_30(): if gdaltest.netcdf_drv is None: pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'netcdf/trmm-nan.nc', 1, 62519) # We don't want to gum up the test stream output with the # 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message. gdal.PushErrorHandler('CPLQuietErrorHandler') result = tst.testOpen() gdal.PopErrorHandler() return result ############################################################################### # check if 2x2 file has proper geotransform # 1 pixel (in width or height) still unsupported because we can't get the pixel dimensions def test_netcdf_31(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/trmm-2x2.nc') ds.GetProjection() gt = ds.GetGeoTransform() gt1 = (-80.0, 0.25, 0.0, -19.5, 0.0, -0.25) assert gt == gt1, ('Incorrect geotransform, got ' + str(gt)) ds = None ############################################################################### # Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053) def test_netcdf_32(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ifile = 'data/byte.tif' ofile = 'tmp/netcdf_32.nc' # gdal.SetConfigOption('CPL_DEBUG', 'ON') # test basic read/write netcdf_test_copy(ifile, 1, 4672, ofile, ['FORMAT=NC4']) netcdf_test_copy(ifile, 1, 4672, ofile, ['FORMAT=NC4C']) ############################################################################### # TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053) def test_netcdf_33(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/nc_vars.nc' ofile = 'tmp/netcdf_33.nc' netcdf_test_copy(ifile, 1, None, ofile, ['FORMAT=NC4']) return netcdf_check_vars('tmp/netcdf_33.nc') ############################################################################### # check support for reading large file with chunking and DEFLATE compression # if chunking is not supported within the netcdf driver, this test can take very long def test_netcdf_34(): filename = 'utm-big-chunks.nc' # this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without timeout = 5 if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() if not gdaltest.run_slow_tests(): pytest.skip() try: from multiprocessing import Process except ImportError: pytest.skip('from multiprocessing import Process failed') if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/' + filename, filename): pytest.skip() sys.stdout.write('.') sys.stdout.flush() tst = gdaltest.GDALTest('NetCDF', '../tmp/cache/' + filename, 1, 31621) # tst.testOpen() gdal.PushErrorHandler('CPLQuietErrorHandler') proc = Process(target=tst.testOpen) proc.start() proc.join(timeout) gdal.PopErrorHandler() # if proc is alive after timeout we must terminate it, and return fail # valgrind detects memory leaks when this occurs (although it should never happen) if proc.is_alive(): proc.terminate() pytest.fail('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout)) ############################################################################### # test writing a long metadata > 8196 chars (bug #5113) def test_netcdf_35(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/netcdf_fixes.nc' ofile = 'tmp/netcdf_35.nc' # copy file netcdf_test_copy(ifile, 0, None, ofile) # test long metadata is copied correctly ds = gdal.Open(ofile) assert ds is not None, 'open of copy failed' md = ds.GetMetadata('') assert 'U#bla' in md, 'U#bla metadata absent' bla = md['U#bla'] assert len(bla) == 9591, \ ('U#bla metadata is of length %d, expecting %d' % (len(bla), 9591)) assert bla[-4:] == '_bla', \ ('U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla')) ############################################################################### # test for correct geotransform (bug #5114) def test_netcdf_36(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/netcdf_fixes.nc' ds = gdal.Open(ifile) assert ds is not None, 'open failed' gt = ds.GetGeoTransform() assert gt is not None, 'got no GeoTransform' gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389) assert gt == gt_expected, \ ('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected))) ############################################################################### # test for correct geotransform with longitude wrap def test_netcdf_36_lonwrap(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/nc_lonwrap.nc' ds = gdal.Open(ifile) assert ds is not None, 'open failed' gt = ds.GetGeoTransform() assert gt is not None, 'got no GeoTransform' gt_expected = (-2.25, 2.5, 0.0, 16.25, 0.0, -2.5) assert gt == gt_expected, \ ('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected))) ############################################################################### # test for reading gaussian grid (bugs #4513 and #5118) def test_netcdf_37(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/reduce-cgcms.nc' gdal.PushErrorHandler('CPLQuietErrorHandler') ds = gdal.Open(ifile) gdal.PopErrorHandler() assert ds is not None, 'open failed' gt = ds.GetGeoTransform() assert gt is not None, 'got no GeoTransform' gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063) assert gt == gt_expected, \ ('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected))) md = ds.GetMetadata('GEOLOCATION2') assert md and 'Y_VALUES' in md, 'did not get 1D geolocation' y_vals = md['Y_VALUES'] assert y_vals.startswith('{-87.15909455586265,-83.47893666931698,') and y_vals.endswith(',83.47893666931698,87.15909455586265}'), \ 'got incorrect values in 1D geolocation' ############################################################################### # test for correct geotransform of projected data in km units (bug #5118) def test_netcdf_38(): if gdaltest.netcdf_drv is None: pytest.skip() ifile = 'data/netcdf/bug5118.nc' gdal.PushErrorHandler('CPLQuietErrorHandler') ds = gdal.Open(ifile) gdal.PopErrorHandler() assert ds is not None, 'open failed' gt = ds.GetGeoTransform() assert gt is not None, 'got no GeoTransform' gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631) assert gt == gt_expected, \ ('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected))) ############################################################################### # Test VRT and NETCDF: def test_netcdf_39(): if gdaltest.netcdf_drv is None: pytest.skip() shutil.copy('data/netcdf/two_vars_scale_offset.nc', 'tmp') src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z') out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds) out_ds = None src_ds = None ds = gdal.Open('tmp/netcdf_39.vrt') cs = ds.GetRasterBand(1).Checksum() ds = None gdal.Unlink('tmp/two_vars_scale_offset.nc') gdal.Unlink('tmp/netcdf_39.vrt') assert cs == 65463 shutil.copy('data/netcdf/two_vars_scale_offset.nc', 'tmp') src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z') out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds) out_ds = None src_ds = None ds = gdal.Open('tmp/netcdf_39.vrt') cs = ds.GetRasterBand(1).Checksum() ds = None gdal.Unlink('tmp/two_vars_scale_offset.nc') gdal.Unlink('tmp/netcdf_39.vrt') assert cs == 65463 shutil.copy('data/netcdf/two_vars_scale_offset.nc', 'tmp') src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd()) out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds) out_ds = None src_ds = None ds = gdal.Open('tmp/netcdf_39.vrt') cs = ds.GetRasterBand(1).Checksum() ds = None gdal.Unlink('tmp/two_vars_scale_offset.nc') gdal.Unlink('tmp/netcdf_39.vrt') assert cs == 65463 src_ds = gdal.Open('NETCDF:"%s/data/netcdf/two_vars_scale_offset.nc":z' % os.getcwd()) out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds) del out_ds src_ds = None ds = gdal.Open('tmp/netcdf_39.vrt') cs = ds.GetRasterBand(1).Checksum() ds = None gdal.Unlink('tmp/netcdf_39.vrt') assert cs == 65463 ############################################################################### # Check support of reading of chunked bottom-up files. def test_netcdf_40(): if gdaltest.netcdf_drv is None or not gdaltest.netcdf_drv_has_nc4: pytest.skip() return netcdf_test_copy('data/netcdf/bug5291.nc', 0, None, 'tmp/netcdf_40.nc') ############################################################################### # Test support for georeferenced file without CF convention def test_netcdf_41(): if gdaltest.netcdf_drv is None: pytest.skip() with gdaltest.error_handler(): ds = gdal.Open('data/netcdf/byte_no_cf.nc') assert ds.GetGeoTransform() == (440720, 60, 0, 3751320, 0, -60) assert ds.GetProjectionRef().find('26711') >= 0, ds.GetGeoTransform() ############################################################################### # Test writing & reading GEOLOCATION array def test_netcdf_42(): if gdaltest.netcdf_drv is None: pytest.skip() src_ds = gdal.GetDriverByName('MEM').Create('', 60, 39, 1) src_ds.SetMetadata([ 'LINE_OFFSET=0', 'LINE_STEP=1', 'PIXEL_OFFSET=0', 'PIXEL_STEP=1', 'SRS=GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]', 'X_BAND=1', 'X_DATASET=../gcore/data/sstgeo.tif', 'Y_BAND=2', 'Y_DATASET=../gcore/data/sstgeo.tif'], 'GEOLOCATION') sr = osr.SpatialReference() sr.ImportFromEPSG(32631) src_ds.SetProjection(sr.ExportToWkt()) gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_42.nc', src_ds) ds = gdal.Open('tmp/netcdf_42.nc') assert (ds.GetMetadata('GEOLOCATION') == { 'LINE_OFFSET': '0', 'X_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lon', 'PIXEL_STEP': '1', 'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]', 'PIXEL_OFFSET': '0', 'X_BAND': '1', 'LINE_STEP': '1', 'Y_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lat', 'Y_BAND': '1'}) ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lon') assert ds.GetRasterBand(1).Checksum() == 36043 ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lat') assert ds.GetRasterBand(1).Checksum() == 33501 ############################################################################### # Test reading GEOLOCATION array from geotransform (non default) def test_netcdf_43(): if gdaltest.netcdf_drv is None: pytest.skip() src_ds = gdal.Open('data/byte.tif') gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_43.nc', src_ds, options=['WRITE_LONLAT=YES']) ds = gdal.Open('tmp/netcdf_43.nc') assert (ds.GetMetadata('GEOLOCATION') == { 'LINE_OFFSET': '0', 'X_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lon', 'PIXEL_STEP': '1', 'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]', 'PIXEL_OFFSET': '0', 'X_BAND': '1', 'LINE_STEP': '1', 'Y_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lat', 'Y_BAND': '1'}) tmp_ds = gdal.Warp('', 'tmp/netcdf_43.nc', options = '-f MEM -geoloc') gt = tmp_ds.GetGeoTransform() assert gt[0] == pytest.approx(-117.3, abs=1), gt assert gt[3] == pytest.approx(33.9, abs=1), gt ############################################################################### # Test NC_USHORT/UINT read/write - netcdf-4 only (#6337) def test_netcdf_44(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() for f, md5 in ('data/netcdf/ushort.nc', 18), ('data/netcdf/uint.nc', 10): netcdf_test_copy(f, 1, md5, 'tmp/netcdf_44.nc', ['FORMAT=NC4']) ############################################################################### # Test reading a vector NetCDF 3 file def test_netcdf_45(): if gdaltest.netcdf_drv is None: pytest.skip() # Test that a vector cannot be opened in raster-only mode ds = gdal.OpenEx('data/netcdf/test_ogr_nc3.nc', gdal.OF_RASTER) assert ds is None # Test that a raster cannot be opened in vector-only mode ds = gdal.OpenEx('data/netcdf/cf-bug636.nc', gdal.OF_VECTOR) assert ds is None ds = gdal.OpenEx('data/netcdf/test_ogr_nc3.nc', gdal.OF_VECTOR) with gdaltest.error_handler(): gdal.VectorTranslate('/vsimem/netcdf_45.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_45.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field "POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125 "POINT (1 2)",,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,, """ assert content == expected_content fp = gdal.VSIFOpenL('/vsimem/netcdf_45.csvt', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_45.csv') gdal.Unlink('/vsimem/netcdf_45.csvt') gdal.Unlink('/vsimem/netcdf_45.prj') ############################################################################### # Test reading a vector NetCDF 3 file def test_netcdf_46(): if gdaltest.netcdf_drv is None: pytest.skip() if test_cli_utilities.get_test_ogrsf_path() is None: pytest.skip() ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/netcdf/test_ogr_nc3.nc') assert ret.find('INFO') != -1 and ret.find('ERROR') == -1 ############################################################################### # Test reading a vector NetCDF 4 file def test_netcdf_47(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # Test that a vector cannot be opened in raster-only mode with gdaltest.error_handler(): ds = gdal.OpenEx('data/netcdf/test_ogr_nc4.nc', gdal.OF_RASTER) assert ds is None ds = gdal.OpenEx('data/netcdf/test_ogr_nc4.nc', gdal.OF_VECTOR) with gdaltest.error_handler(): gdal.VectorTranslate('/vsimem/netcdf_47.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_47.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue "POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123, "POINT (1 2)",,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,,,,,,,,, """ assert content == expected_content fp = gdal.VSIFOpenL('/vsimem/netcdf_47.csvt', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_47.csv') gdal.Unlink('/vsimem/netcdf_47.csvt') gdal.Unlink('/vsimem/netcdf_47.prj') ############################################################################### # Test reading a vector NetCDF 3 file without any geometry def test_netcdf_48(): if gdaltest.netcdf_drv is None: pytest.skip() with gdaltest.error_handler(): ds = gdal.OpenEx('data/netcdf/test_ogr_no_xyz_var.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) assert lyr.GetGeomType() == ogr.wkbNone f = lyr.GetNextFeature() assert f['int32'] == 1 ############################################################################### # Test reading a vector NetCDF 3 file with X,Y,Z vars as float def test_netcdf_49(): if gdaltest.netcdf_drv is None: pytest.skip() with gdaltest.error_handler(): ds = gdal.OpenEx('data/netcdf/test_ogr_xyz_float.nc', gdal.OF_VECTOR) gdal.VectorTranslate('/vsimem/netcdf_49.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_49.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32 "POINT Z (1 2 3)",1 "POINT (1 2)", ,, """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_49.csv') ############################################################################### # Test creating a vector NetCDF 3 file with WKT geometry field def test_netcdf_50(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR) out_ds = gdal.VectorTranslate('tmp/netcdf_50.nc', ds, format='netCDF', layerCreationOptions=['WKT_DEFAULT_WIDTH=1'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT']) src_lyr = ds.GetLayer(0) src_lyr.ResetReading() out_lyr = out_ds.GetLayer(0) out_lyr.ResetReading() src_f = src_lyr.GetNextFeature() out_f = out_lyr.GetNextFeature() src_f.SetFID(-1) out_f.SetFID(-1) src_json = src_f.ExportToJson() out_json = out_f.ExportToJson() assert src_json == out_json out_ds = None out_ds = gdal.OpenEx('tmp/netcdf_50.nc', gdal.OF_VECTOR) out_lyr = out_ds.GetLayer(0) srs = out_lyr.GetSpatialRef().ExportToWkt() assert 'PROJCS["OSGB 1936' in srs out_f = out_lyr.GetNextFeature() out_f.SetFID(-1) out_json = out_f.ExportToJson() assert src_json == out_json out_ds = None gdal.Unlink('tmp/netcdf_50.nc') ############################################################################### # Test creating a vector NetCDF 3 file with X,Y,Z fields def test_netcdf_51(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.OpenEx('data/netcdf/test_ogr_nc3.nc', gdal.OF_VECTOR) # Test autogrow of string fields gdal.VectorTranslate('tmp/netcdf_51.nc', ds, format='netCDF', layerCreationOptions=['STRING_DEFAULT_WIDTH=1'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT']) with gdaltest.error_handler(): ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR) gdal.VectorTranslate('/vsimem/netcdf_51.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT']) ds = None fp = gdal.VSIFOpenL('/vsimem/netcdf_51.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field "POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125 "POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,, """ assert content == expected_content fp = gdal.VSIFOpenL('/vsimem/netcdf_51.csvt', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer """ assert content == expected_content ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR | gdal.OF_UPDATE) lyr = ds.GetLayer(0) lyr.CreateField(ogr.FieldDefn('extra', ogr.OFTInteger)) lyr.CreateField(ogr.FieldDefn('extra_str', ogr.OFTString)) f = lyr.GetNextFeature() assert f is not None f['extra'] = 5 f['extra_str'] = 'foobar' assert lyr.CreateFeature(f) == 0 ds = None ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) f = lyr.GetFeature(lyr.GetFeatureCount()) assert f['int32'] == 1 and f['extra'] == 5 and f['extra_str'] == 'foobar' f = None ds = None import netcdf_cf netcdf_cf.netcdf_cf_setup() if gdaltest.netcdf_cf_method is not None: netcdf_cf.netcdf_cf_check_file('tmp/netcdf_51.nc', 'auto', False) gdal.Unlink('tmp/netcdf_51.nc') gdal.Unlink('tmp/netcdf_51.csv') gdal.Unlink('tmp/netcdf_51.csvt') gdal.Unlink('/vsimem/netcdf_51.csv') gdal.Unlink('/vsimem/netcdf_51.csvt') gdal.Unlink('/vsimem/netcdf_51.prj') ############################################################################### # Test creating a vector NetCDF 3 file with X,Y,Z fields with WRITE_GDAL_TAGS=NO def test_netcdf_51_no_gdal_tags(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.OpenEx('data/netcdf/test_ogr_nc3.nc', gdal.OF_VECTOR) gdal.VectorTranslate('tmp/netcdf_51_no_gdal_tags.nc', ds, format='netCDF', datasetCreationOptions=['WRITE_GDAL_TAGS=NO', 'GEOMETRY_ENCODING=WKT']) with gdaltest.error_handler(): ds = gdal.OpenEx('tmp/netcdf_51_no_gdal_tags.nc', gdal.OF_VECTOR) gdal.VectorTranslate('/vsimem/netcdf_51_no_gdal_tags.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) ds = None fp = gdal.VSIFOpenL('/vsimem/netcdf_51_no_gdal_tags.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x1,byte_field "POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125 "POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,, """ assert content == expected_content fp = gdal.VSIFOpenL('/vsimem/netcdf_51_no_gdal_tags.csvt', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String(10),Date,DateTime,DateTime,Real,Real,Integer,Integer,Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer """ assert content == expected_content gdal.Unlink('tmp/netcdf_51_no_gdal_tags.nc') gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csv') gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csvt') gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csv') gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csvt') gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.prj') ############################################################################### # Test creating a vector NetCDF 4 file with X,Y,Z fields def test_netcdf_52(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() if gdaltest.netcdf_drv_version in ('4.6.3', '4.7.0'): pytest.skip('buggy netCDF version: https://github.com/Unidata/netcdf-c/pull/1442') ds = gdal.OpenEx('data/netcdf/test_ogr_nc4.nc', gdal.OF_VECTOR) gdal.VectorTranslate('tmp/netcdf_52.nc', ds, format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT']) with gdaltest.error_handler(): ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR) gdal.VectorTranslate('/vsimem/netcdf_52.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) ds = None fp = gdal.VSIFOpenL('/vsimem/netcdf_52.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue "POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123, "POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,,,,,,,,,,,, """ assert content == expected_content fp = gdal.VSIFOpenL('/vsimem/netcdf_52.csvt', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real """ assert content == expected_content ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR | gdal.OF_UPDATE) lyr = ds.GetLayer(0) lyr.CreateField(ogr.FieldDefn('extra', ogr.OFTInteger)) f = lyr.GetNextFeature() assert f is not None f['extra'] = 5 assert lyr.CreateFeature(f) == 0 ds = None ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) f = lyr.GetFeature(lyr.GetFeatureCount()) assert f['int32'] == 1 and f['extra'] == 5 f = None ds = None import netcdf_cf netcdf_cf.netcdf_cf_setup() if gdaltest.netcdf_cf_method is not None: netcdf_cf.netcdf_cf_check_file('tmp/netcdf_52.nc', 'auto', False) gdal.Unlink('tmp/netcdf_52.nc') gdal.Unlink('tmp/netcdf_52.csv') gdal.Unlink('tmp/netcdf_52.csvt') gdal.Unlink('/vsimem/netcdf_52.csv') gdal.Unlink('/vsimem/netcdf_52.csvt') gdal.Unlink('/vsimem/netcdf_52.prj') ############################################################################### # Test creating a vector NetCDF 4 file with WKT geometry field def test_netcdf_53(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR) out_ds = gdal.VectorTranslate('tmp/netcdf_53.nc', ds, format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT']) src_lyr = ds.GetLayer(0) src_lyr.ResetReading() out_lyr = out_ds.GetLayer(0) out_lyr.ResetReading() src_f = src_lyr.GetNextFeature() out_f = out_lyr.GetNextFeature() src_f.SetFID(-1) out_f.SetFID(-1) src_json = src_f.ExportToJson() out_json = out_f.ExportToJson() assert src_json == out_json out_ds = None out_ds = gdal.OpenEx('tmp/netcdf_53.nc', gdal.OF_VECTOR) out_lyr = out_ds.GetLayer(0) srs = out_lyr.GetSpatialRef().ExportToWkt() assert 'PROJCS["OSGB 1936' in srs out_f = out_lyr.GetNextFeature() out_f.SetFID(-1) out_json = out_f.ExportToJson() assert src_json == out_json out_ds = None gdal.Unlink('tmp/netcdf_53.nc') ############################################################################### # Test appending to a vector NetCDF 4 file with unusual types (ubyte, ushort...) def test_netcdf_54(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() if gdaltest.netcdf_drv_version in ('4.6.3', '4.7.0'): pytest.skip('buggy netCDF version: https://github.com/Unidata/netcdf-c/pull/1442') shutil.copy('data/netcdf/test_ogr_nc4.nc', 'tmp/netcdf_54.nc') ds = gdal.OpenEx('tmp/netcdf_54.nc', gdal.OF_VECTOR | gdal.OF_UPDATE) lyr = ds.GetLayer(0) f = lyr.GetNextFeature() assert f is not None f['int32'] += 1 f.SetFID(-1) f.ExportToJson() src_json = f.ExportToJson() assert lyr.CreateFeature(f) == 0 ds = None ds = gdal.OpenEx('tmp/netcdf_54.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) f = lyr.GetFeature(lyr.GetFeatureCount()) f.SetFID(-1) out_json = f.ExportToJson() f = None ds = None gdal.Unlink('tmp/netcdf_54.nc') assert src_json == out_json ############################################################################### # Test auto-grow of bidimensional char variables in a vector NetCDF 4 file def test_netcdf_55(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() shutil.copy('data/netcdf/test_ogr_nc4.nc', 'tmp/netcdf_55.nc') ds = gdal.OpenEx('tmp/netcdf_55.nc', gdal.OF_VECTOR | gdal.OF_UPDATE) lyr = ds.GetLayer(0) f = lyr.GetNextFeature() assert f is not None f['twodimstringchar'] = 'abcd' f.SetFID(-1) f.ExportToJson() src_json = f.ExportToJson() assert lyr.CreateFeature(f) == 0 ds = None ds = gdal.OpenEx('tmp/netcdf_55.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) f = lyr.GetFeature(lyr.GetFeatureCount()) f.SetFID(-1) out_json = f.ExportToJson() f = None ds = None gdal.Unlink('tmp/netcdf_55.nc') assert src_json == out_json ############################################################################### # Test truncation of bidimensional char variables and WKT in a vector NetCDF 3 file def test_netcdf_56(): if gdaltest.netcdf_drv is None: pytest.skip() ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_56.nc', options=['GEOMETRY_ENCODING=WKT']) # Test auto-grow of WKT field lyr = ds.CreateLayer('netcdf_56', options=['AUTOGROW_STRINGS=NO', 'STRING_DEFAULT_WIDTH=5', 'WKT_DEFAULT_WIDTH=5']) lyr.CreateField(ogr.FieldDefn('txt')) f = ogr.Feature(lyr.GetLayerDefn()) f['txt'] = '0123456789' f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)')) with gdaltest.error_handler(): ret = lyr.CreateFeature(f) assert ret == 0 ds = None ds = gdal.OpenEx('tmp/netcdf_56.nc', gdal.OF_VECTOR) lyr = ds.GetLayer(0) f = lyr.GetFeature(lyr.GetFeatureCount()) if f['txt'] != '01234' or f.GetGeometryRef() is not None: f.DumpReadable() pytest.fail() ds = None gdal.Unlink('tmp/netcdf_56.nc') ############################################################################### # Test one layer per file creation def test_netcdf_57(): if gdaltest.netcdf_drv is None: pytest.skip() try: shutil.rmtree('tmp/netcdf_57') except OSError: pass with gdaltest.error_handler(): ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT']) assert ds is None open('tmp/netcdf_57', 'wb').close() with gdaltest.error_handler(): ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT']) assert ds is None os.unlink('tmp/netcdf_57') ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_57', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT']) for ilayer in range(2): lyr = ds.CreateLayer('lyr%d' % ilayer) lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTInteger)) f = ogr.Feature(lyr.GetLayerDefn()) f['lyr_id'] = ilayer lyr.CreateFeature(f) ds = None for ilayer in range(2): ds = ogr.Open('tmp/netcdf_57/lyr%d.nc' % ilayer) lyr = ds.GetLayer(0) f = lyr.GetNextFeature() assert f['lyr_id'] == ilayer ds = None shutil.rmtree('tmp/netcdf_57') ############################################################################### # Test one layer per group (NC4) def test_netcdf_58(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_58.nc', options=['FORMAT=NC4', 'MULTIPLE_LAYERS=SEPARATE_GROUPS', 'GEOMETRY_ENCODING=WKT']) for ilayer in range(2): # Make sure auto-grow will happen to test this works well with multiple groups lyr = ds.CreateLayer('lyr%d' % ilayer, geom_type=ogr.wkbNone, options=['USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1']) lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTString)) f = ogr.Feature(lyr.GetLayerDefn()) f['lyr_id'] = 'lyr_%d' % ilayer lyr.CreateFeature(f) ds = None ds = ogr.Open('tmp/netcdf_58.nc') for ilayer in range(2): lyr = ds.GetLayer(ilayer) f = lyr.GetNextFeature() assert f['lyr_id'] == 'lyr_%d' % ilayer ds = None gdal.Unlink('tmp/netcdf_58.nc') ############################################################################### # check for UnitType set/get. def test_netcdf_59(): if gdaltest.netcdf_drv is None: pytest.skip() # get ds = gdal.Open('data/netcdf/unittype.nc') unit = ds.GetRasterBand(1).GetUnitType() assert unit == 'm/s', ('Incorrect unit(%s)' % unit) ds = None # set tst = gdaltest.GDALTest('NetCDF', 'netcdf/unittype.nc', 1, 4672) return tst.testSetUnitType() ############################################################################### # Test reading a "Indexed ragged array representation of profiles" v1.6.0 H3.5 # http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_indexed_ragged_array_representation_of_profiles def test_netcdf_60(): if gdaltest.netcdf_drv is None: pytest.skip() # Test that a vector cannot be opened in raster-only mode ds = gdal.OpenEx('data/netcdf/profile.nc', gdal.OF_RASTER) assert ds is None ds = gdal.OpenEx('data/netcdf/profile.nc', gdal.OF_VECTOR) assert ds is not None with gdaltest.error_handler(): gdal.VectorTranslate('/vsimem/netcdf_60.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_60.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile,id,station,foo "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_60.csv') ############################################################################### # Test appending to a "Indexed ragged array representation of profiles" v1.6.0 H3.5 def test_netcdf_61(): if gdaltest.netcdf_drv is None: pytest.skip() shutil.copy('data/netcdf/profile.nc', 'tmp/netcdf_61.nc') ds = gdal.VectorTranslate('tmp/netcdf_61.nc', 'data/netcdf/profile.nc', accessMode='append') gdal.VectorTranslate('/vsimem/netcdf_61.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_61.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile,id,station,foo "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_61.csv') gdal.Unlink('/vsimem/netcdf_61.nc') ############################################################################### # Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5 def test_netcdf_62(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.VectorTranslate('tmp/netcdf_62.nc', 'data/netcdf/profile.nc', format='netCDF', layerCreationOptions=['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_INIT_SIZE=1', 'PROFILE_VARIABLES=station'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT']) gdal.VectorTranslate('/vsimem/netcdf_62.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_62.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile,id,station,foo "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_62.csv') def test_netcdf_62_ncdump_check(): if gdaltest.netcdf_drv is None: pytest.skip() # get file header with ncdump (if available) try: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h') except OSError: err = None if err is not None and 'netcdf library version' in err: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_62.nc') assert ('profile = 2' in ret and \ 'record = UNLIMITED' in ret and \ 'profile:cf_role = "profile_id"' in ret and \ 'parentIndex:instance_dimension = "profile"' in ret and \ ':featureType = "profile"' in ret and \ 'char station(profile' in ret and \ 'char foo(record' in ret) else: pytest.skip() def test_netcdf_62_cf_check(): if gdaltest.netcdf_drv is None: pytest.skip() import netcdf_cf netcdf_cf.netcdf_cf_setup() if gdaltest.netcdf_cf_method is not None: netcdf_cf.netcdf_cf_check_file('tmp/netcdf_62.nc', 'auto', False) gdal.Unlink('/vsimem/netcdf_62.nc') ############################################################################### # Test creating a NC4 "Indexed ragged array representation of profiles" v1.6.0 H3.5 def test_netcdf_63(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() shutil.copy('data/netcdf/profile.nc', 'tmp/netcdf_63.nc') ds = gdal.VectorTranslate('tmp/netcdf_63.nc', 'data/netcdf/profile.nc', format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT'], layerCreationOptions=['FEATURE_TYPE=PROFILE', \ 'USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1']) gdal.VectorTranslate('/vsimem/netcdf_63.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_63.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile,id,station,foo "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_63.csv') def test_netcdf_63_ncdump_check(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # get file header with ncdump (if available) try: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h') except OSError: err = None if err is not None and 'netcdf library version' in err: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_63.nc') assert ('profile = UNLIMITED' in ret and \ 'record = UNLIMITED' in ret and \ 'profile:cf_role = "profile_id"' in ret and \ 'parentIndex:instance_dimension = "profile"' in ret and \ ':featureType = "profile"' in ret and \ 'char station(record' in ret) else: gdal.Unlink('/vsimem/netcdf_63.nc') pytest.skip() gdal.Unlink('/vsimem/netcdf_63.nc') ############################################################################### # Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5 # but without a profile field. def test_netcdf_64(): if gdaltest.netcdf_drv is None: pytest.skip() gdal.VectorTranslate('tmp/netcdf_64.nc', 'data/netcdf/profile.nc', format='netCDF', selectFields=['id,station,foo'], layerCreationOptions=['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_NAME=profile_dim', 'PROFILE_DIM_INIT_SIZE=1', 'LEGACY=WKT'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT']) gdal.VectorTranslate('/vsimem/netcdf_64.csv', 'tmp/netcdf_64.nc', format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_64.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile_dim,id,station,foo "POINT Z (2 49 100)",0,1,Palo Alto,bar "POINT Z (3 50 50)",1,2,Santa Fe,baz "POINT Z (2 49 200)",0,3,Palo Alto,baw "POINT Z (3 50 100)",1,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_64.csv') gdal.Unlink('/vsimem/netcdf_64.nc') ############################################################################### # Test creating a NC4 file with empty string fields / WKT fields # (they must be filled as empty strings to avoid crashes in netcdf lib) def test_netcdf_65(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_65.nc', options=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT']) lyr = ds.CreateLayer('test') lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString)) f = ogr.Feature(lyr.GetLayerDefn()) lyr.CreateFeature(f) ds = None ds = ogr.Open('tmp/netcdf_65.nc') lyr = ds.GetLayer(0) f = lyr.GetNextFeature() if f['str'] != '': f.DumpReadable() pytest.fail() ds = None gdal.Unlink('tmp/netcdf_65.nc') ############################################################################### # Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5 # from a config file def test_netcdf_66(): if gdaltest.netcdf_drv is None: pytest.skip() # First trying with no so good configs with gdaltest.error_handler(): gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/netcdf/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=not_existing']) with gdaltest.error_handler(): gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/netcdf/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=<Configuration>']) myconfig = \ """<Configuration> <!-- comment --> <unrecognized_elt/> <DatasetCreationOption/> <DatasetCreationOption name="x"/> <DatasetCreationOption value="x"/> <LayerCreationOption/> <LayerCreationOption name="x"/> <LayerCreationOption value="x"/> <Attribute/> <Attribute name="foo"/> <Attribute value="foo"/> <Attribute name="foo" value="bar" type="unsupported"/> <Field/> <Field name="x"> <!-- comment --> <unrecognized_elt/> </Field> <Field name="station" main_dim="non_existing"/> <Layer/> <Layer name="x"> <!-- comment --> <unrecognized_elt/> <LayerCreationOption/> <LayerCreationOption name="x"/> <LayerCreationOption value="x"/> <Attribute/> <Attribute name="foo"/> <Attribute value="foo"/> <Attribute name="foo" value="bar" type="unsupported"/> <Field/> </Layer> </Configuration> """ with gdaltest.error_handler(): gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/netcdf/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=' + myconfig, 'GEOMETRY_ENCODING=WKT']) # Now with a correct configuration myconfig = \ """<Configuration> <DatasetCreationOption name="WRITE_GDAL_TAGS" value="NO"/> <LayerCreationOption name="STRING_DEFAULT_WIDTH" value="1"/> <Attribute name="foo" value="bar"/> <Attribute name="foo2" value="bar2"/> <Field name="id"> <Attribute name="my_extra_attribute" value="5.23" type="double"/> </Field> <Field netcdf_name="lon"> <!-- edit predefined variable --> <Attribute name="my_extra_lon_attribute" value="foo"/> </Field> <Layer name="profile" netcdf_name="my_profile"> <LayerCreationOption name="FEATURE_TYPE" value="PROFILE"/> <LayerCreationOption name="RECORD_DIM_NAME" value="obs"/> <Attribute name="foo" value="123" type="integer"/> <!-- override global one --> <Field name="station" netcdf_name="my_station" main_dim="obs"> <Attribute name="long_name" value="my station attribute"/> </Field> <Field netcdf_name="lat"> <!-- edit predefined variable --> <Attribute name="long_name" value=""/> <!-- remove predefined attribute --> </Field> </Layer> </Configuration> """ gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/netcdf/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=' + myconfig, 'GEOMETRY_ENCODING=WKT']) gdal.VectorTranslate('/vsimem/netcdf_66.csv', 'tmp/netcdf_66.nc', format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED']) fp = gdal.VSIFOpenL('/vsimem/netcdf_66.csv', 'rb') if fp is not None: content = gdal.VSIFReadL(1, 10000, fp).decode('ascii') gdal.VSIFCloseL(fp) expected_content = """WKT,profile,id,my_station,foo "POINT Z (2 49 100)",1,1,Palo Alto,bar "POINT Z (3 50 50)",2,2,Santa Fe,baz "POINT Z (2 49 200)",1,3,Palo Alto,baw "POINT Z (3 50 100)",2,4,Santa Fe,baz2 """ assert content == expected_content gdal.Unlink('/vsimem/netcdf_66.csv') def test_netcdf_66_ncdump_check(): if gdaltest.netcdf_drv is None: pytest.skip() # get file header with ncdump (if available) try: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h') except OSError: err = None if err is not None and 'netcdf library version' in err: (ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_66.nc') assert ('char my_station(obs, my_station_max_width)' in ret and \ 'my_station:long_name = "my station attribute"' in ret and \ 'lon:my_extra_lon_attribute = "foo"' in ret and \ 'lat:long_name' not in ret and \ 'id:my_extra_attribute = 5.23' in ret and \ 'profile:cf_role = "profile_id"' in ret and \ 'parentIndex:instance_dimension = "profile"' in ret and \ ':featureType = "profile"' in ret) else: gdal.Unlink('/vsimem/netcdf_66.nc') pytest.skip() gdal.Unlink('/vsimem/netcdf_66.nc') ############################################################################### # ticket #5950: optimize IReadBlock() and CheckData() handling of partial # blocks in the x axischeck for partial block reading. def test_netcdf_67(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() try: import numpy except ImportError: pytest.skip() # disable bottom-up mode to use the real file's blocks size gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'NO') # for the moment the next test using check_stat does not work, seems like # the last pixel (9) of the image is not handled by stats... # tst = gdaltest.GDALTest( 'NetCDF', 'partial_block_ticket5950.nc', 1, 45 ) # result = tst.testOpen( check_stat=(1, 9, 5, 2.582) ) # so for the moment compare the full image ds = gdal.Open('data/netcdf/partial_block_ticket5950.nc', gdal.GA_ReadOnly) ref = numpy.arange(1, 10).reshape((3, 3)) if not numpy.array_equal(ds.GetRasterBand(1).ReadAsArray(), ref): pytest.fail() ds = None gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', None) ############################################################################### # Test reading SRS from srid attribute (#6613) def test_netcdf_68(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/srid.nc') wkt = ds.GetProjectionRef() assert '6933' in wkt ############################################################################### # Test opening a dataset with a 1D variable with 0 record (#6645) def test_netcdf_69(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/test6645.nc') assert ds is not None ############################################################################### # Test that we don't erroneously identify non-longitude axis as longitude (#6759) def test_netcdf_70(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/test6759.nc') gt = ds.GetGeoTransform() expected_gt = [304250.0, 250.0, 0.0, 4952500.0, 0.0, -250.0] assert max(abs(gt[i] - expected_gt[i]) for i in range(6)) <= 1e-3 ############################################################################### # Test that we take into account x and y offset and scaling # (https://github.com/OSGeo/gdal/pull/200) def test_netcdf_71(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/test_coord_scale_offset.nc') gt = ds.GetGeoTransform() expected_gt = (-690769.999174516, 1015.8812500000931, 0.0, 2042963.9463741186, 0.0, -1015.8812499996275) assert gt == pytest.approx(expected_gt, abs=1e-3) ############################################################################### # test int64 attributes / dim def test_netcdf_72(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = gdal.Open('data/netcdf/int64dim.nc') mdi = ds.GetRasterBand(1).GetMetadataItem('NETCDF_DIM_TIME') assert mdi == '123456789012' ############################################################################### # test geostationary with radian units (https://github.com/OSGeo/gdal/pull/220) def test_netcdf_73(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/geos_rad.nc') gt = ds.GetGeoTransform() expected_gt = (-5979486.362104082, 1087179.4077774752, 0.0, 5979486.362104082, 0.0, -1087179.4077774752) assert gt == pytest.approx(expected_gt, abs=1e-3) ############################################################################### # test geostationary with microradian units (https://github.com/OSGeo/gdal/pull/220) def test_netcdf_74(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/geos_microradian.nc') gt = ds.GetGeoTransform() expected_gt = (-5739675.119757546, 615630.8078590936, 0.0, 5739675.119757546, 0.0, -615630.8078590936) assert gt == pytest.approx(expected_gt, abs=1e-3) ############################################################################### # test opening a ncdump file def test_netcdf_75(): if gdaltest.netcdf_drv is None: pytest.skip() if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES': pytest.skip() tst = gdaltest.GDALTest('NetCDF', 'netcdf/byte.nc.txt', 1, 4672) wkt = """PROJCS["NAD27 / UTM zone 11N", GEOGCS["NAD27", DATUM["North_American_Datum_1927", SPHEROID["Clarke 1866",6378206.4,294.9786982139006, AUTHORITY["EPSG","7008"]], AUTHORITY["EPSG","6267"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4267"]], PROJECTION["Transverse_Mercator"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",-117], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AUTHORITY["EPSG","26711"]]""" return tst.testOpen(check_prj=wkt) ############################################################################### # test opening a vector ncdump file def test_netcdf_76(): if gdaltest.netcdf_drv is None: pytest.skip() if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES': pytest.skip() ds = ogr.Open('data/netcdf/poly.nc.txt') lyr = ds.GetLayer(0) f = lyr.GetNextFeature() if f is None or f.GetGeometryRef() is None: f.DumpReadable() pytest.fail() ############################################################################### # test opening a raster file that used to be confused with a vector file (#6974) def test_netcdf_77(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/fake_Oa01_radiance.nc') subdatasets = ds.GetMetadata('SUBDATASETS') assert len(subdatasets) == 2 * 2 ds = gdal.Open('NETCDF:"data/netcdf/fake_Oa01_radiance.nc":Oa01_radiance') assert not ds.GetMetadata('GEOLOCATION') ############################################################################### # test we handle correctly valid_range={0,255} for a byte dataset with # negative nodata value def test_netcdf_78(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/byte_with_valid_range.nc') assert ds.GetRasterBand(1).GetNoDataValue() == 240 data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('B' * 4, data) assert data == (128, 129, 126, 127) ############################################################################### # test we handle correctly _Unsigned="true" for a byte dataset with # negative nodata value def test_netcdf_79(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/byte_with_neg_fillvalue_and_unsigned_hint.nc') assert ds.GetRasterBand(1).GetNoDataValue() == 240 data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('B' * 4, data) assert data == (128, 129, 126, 127) ############################################################################### # Test creating and opening with accent def test_netcdf_80(): if gdaltest.netcdf_drv is None: pytest.skip() test = gdaltest.GDALTest('NETCDF', '../data/byte.tif', 1, 4672) return test.testCreateCopy(new_filename='test\xc3\xa9.nc', check_gt=0, check_srs=0, check_minmax=0) ############################################################################### # netCDF file in rotated_pole projection def test_netcdf_81(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/rotated_pole.nc') assert ds.RasterXSize == 137 and ds.RasterYSize == 108, \ 'Did not get expected dimensions' projection = ds.GetProjectionRef() # Before PROJ 7.0.1 deprecated_expected_projection = """PROJCS["unnamed",GEOGCS["unknown",DATUM["unnamed",SPHEROID["Spheroid",6367470,594.313048347956]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Rotated_pole"],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=ob_tran +o_proj=longlat +lon_0=18 +o_lon_p=0 +o_lat_p=39.25 +a=6367470 +b=6367470 +to_meter=0.0174532925199 +wktext"]]""" expected_projection = """GEOGCRS["unnamed",BASEGEOGCRS["unknown",DATUM["unknown",ELLIPSOID["unknown",6367470,0,LENGTHUNIT["metre",1,ID["EPSG",9001]]]],PRIMEM["Greenwich",0,ANGLEUNIT["degree",0.0174532925199433],ID["EPSG",8901]]],DERIVINGCONVERSION["unknown",METHOD["PROJ ob_tran o_proj=longlat"],PARAMETER["lon_0",18,ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]],PARAMETER["o_lon_p",0,ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]],PARAMETER["o_lat_p",39.25,ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]]],CS[ellipsoidal,2],AXIS["longitude",east,ORDER[1],ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]],AXIS["latitude",north,ORDER[2],ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]]]""" assert projection in (expected_projection, deprecated_expected_projection) gt = ds.GetGeoTransform() expected_gt = (-35.47, 0.44, 0.0, 23.65, 0.0, -0.44) assert max([abs(gt[i] - expected_gt[i]) for i in range(6)]) <= 1e-3, \ 'Did not get expected geotransform' ############################################################################### # netCDF file with extra dimensions that are oddly indexed (1D variable # corresponding to the dimension but with a different name, no corresponding # 1D variable, several corresponding variables) def test_netcdf_82(): if gdaltest.netcdf_drv is None: pytest.skip() with gdaltest.error_handler(): ds = gdal.Open('data/netcdf/oddly_indexed_extra_dims.nc') md = ds.GetMetadata() expected_md = { 'NETCDF_DIM_extra_dim_with_var_of_different_name_VALUES': '{100,200}', 'NETCDF_DIM_EXTRA': '{extra_dim_with_several_variables,extra_dim_without_variable,extra_dim_with_var_of_different_name}', 'x#standard_name': 'projection_x_coordinate', 'NC_GLOBAL#Conventions': 'CF-1.5', 'y#standard_name': 'projection_y_coordinate', 'NETCDF_DIM_extra_dim_with_var_of_different_name_DEF': '{2,6}' } assert md == expected_md, 'Did not get expected metadata' md = ds.GetRasterBand(1).GetMetadata() expected_md = { 'NETCDF_DIM_extra_dim_with_several_variables': '1', 'NETCDF_DIM_extra_dim_with_var_of_different_name': '100', 'NETCDF_DIM_extra_dim_without_variable': '1', 'NETCDF_VARNAME': 'data' } assert md == expected_md, 'Did not get expected metadata' ############################################################################### # Test complex data subsets def test_netcdf_83(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/complex.nc') sds_list = ds.GetMetadata('SUBDATASETS') assert len(sds_list) == 6, 'Did not get expected complex subdataset count.' assert sds_list['SUBDATASET_1_NAME'] == 'NETCDF:"data/netcdf/complex.nc":f32' and sds_list['SUBDATASET_2_NAME'] == 'NETCDF:"data/netcdf/complex.nc":f64' and sds_list['SUBDATASET_3_NAME'] == 'NETCDF:"data/netcdf/complex.nc":/group/fmul', \ 'did not get expected subdatasets.' ds = None assert not gdaltest.is_file_open('data/netcdf/complex.nc'), 'file still opened.' ############################################################################### # Confirm complex subset data access and checksum # Start with Float32 def test_netcdf_84(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('NETCDF:"data/netcdf/complex.nc":f32') assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat32 cs = ds.GetRasterBand(1).Checksum() assert cs == 523, 'did not get expected checksum' # Repeat for Float64 def test_netcdf_85(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('NETCDF:"data/netcdf/complex.nc":f64') assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat64 cs = ds.GetRasterBand(1).Checksum() assert cs == 511, 'did not get expected checksum' # Check for groups support def test_netcdf_86(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('NETCDF:"data/netcdf/complex.nc":/group/fmul') assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat32 cs = ds.GetRasterBand(1).Checksum() assert cs == 453, 'did not get expected checksum for band 1' cs = ds.GetRasterBand(2).Checksum() assert cs == 629, 'did not get expected checksum for band 2' cs = ds.GetRasterBand(3).Checksum() assert cs == 473, 'did not get expected checksum for band 3' ############################################################################### def test_netcdf_uffd(): if gdaltest.netcdf_drv is None: pytest.skip() if uffd_compare('netcdf/orog_CRCM1.nc') is None: pytest.skip() netcdf_files = [ 'orog_CRCM1.nc', 'orog_CRCM2.nc', 'cf-bug636.nc', 'bug636.nc', 'rotated_pole.nc', 'reduce-cgcms.nc' ] for netcdf_file in netcdf_files: assert uffd_compare('netcdf/' + netcdf_file) is True ############################################################################### # netCDF file containing both rasters and vectors def test_netcdf_mixed_raster_vector(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('NETCDF:data/netcdf/nc_mixed_raster_vector.nc:Band1') assert ds.GetRasterBand(1).Checksum() == 4672 ds = ogr.Open('data/netcdf/nc_mixed_raster_vector.nc') lyr = ds.GetLayer(0) f = lyr.GetNextFeature() assert f['PRFEDEA'] == '35043411' ############################################################################### # Test opening a file with an empty double attribute # https://github.com/OSGeo/gdal/issues/1303 def test_netcdf_open_empty_double_attr(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/empty_double_attr.nc') assert ds ############################################################################### # Test writing and reading a file with huge block size def test_netcdf_huge_block_size(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.run_slow_tests(): pytest.skip() if sys.maxsize < 2**32: pytest.skip('Test not available on 32 bit') import psutil if psutil.virtual_memory().available < 2 * 50000 * 50000: pytest.skip("Not enough virtual memory available") tmpfilename = 'tmp/test_netcdf_huge_block_size.nc' with gdaltest.SetCacheMax(50000 * 50000 + 100000): with gdaltest.config_option('BLOCKYSIZE', '50000'): gdal.Translate(tmpfilename, '../gcore/data/byte.tif', options='-f netCDF -outsize 50000 50000 -co WRITE_BOTTOMUP=NO -co COMPRESS=DEFLATE -co FORMAT=NC4') ds = gdal.Open(tmpfilename) data = ds.ReadRaster(0, 0, ds.RasterXSize, ds.RasterYSize, buf_xsize = 20, buf_ysize = 20) assert data ref_ds = gdal.Open('../gcore/data/byte.tif') assert data == ref_ds.ReadRaster() ds = None gdal.Unlink(tmpfilename) ############################################################################### # Test reading a netCDF file whose fastest varying dimension is Latitude, and # slowest one is Longitude # https://lists.osgeo.org/pipermail/gdal-dev/2019-March/049931.html # Currently we expose it in a 'raw' way, but make sure that geotransform and # geoloc arrays reflect the georeferencing correctly def test_netcdf_swapped_x_y_dimension(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/swapedxy.nc') assert ds.RasterXSize == 4 assert ds.RasterYSize == 8 assert ds.GetGeoTransform() == (90.0, -45.0, 0, -180, 0.0, 45.0) data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('h' * 4 * 8, data) assert data == (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13 ,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31) md = ds.GetMetadata('GEOLOCATION') assert md == { 'LINE_OFFSET': '0', 'X_DATASET': 'NETCDF:"data/netcdf/swapedxy.nc":Latitude', 'SWAP_XY': 'YES', 'PIXEL_STEP': '1', 'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]', 'PIXEL_OFFSET': '0', 'X_BAND': '1', 'LINE_STEP': '1', 'Y_DATASET': 'NETCDF:"data/netcdf/swapedxy.nc":Longitude', 'Y_BAND': '1'}, md ds = gdal.Open(md['X_DATASET']) assert ds.RasterXSize == 4 assert ds.RasterYSize == 1 data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('f' * 4, data) assert data == (67.5, 22.5, -22.5, -67.5) ds = gdal.Open(md['Y_DATASET']) assert ds.RasterXSize == 8 assert ds.RasterYSize == 1 data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('f' * 8, data) assert data == (-157.5, -112.5, -67.5, -22.5, 22.5, 67.5, 112.5, 157.5) ds = gdal.Warp('', 'data/netcdf/swapedxy.nc', options = '-f MEM -geoloc') assert ds.RasterXSize == 8 assert ds.RasterYSize == 4 assert ds.GetGeoTransform() == (-157.5, 38.3161193233344, 0.0, 67.5, 0.0, -38.3161193233344) data = ds.GetRasterBand(1).ReadRaster() data = struct.unpack('h' * 4 * 8, data) # not exactly the transposed array, but not so far assert data == (4, 8, 8, 12, 16, 20, 20, 24, 5, 9, 9, 13, 17, 21, 21, 25, 6, 10, 10, 14, 18, 22, 22, 26, 7, 11, 11, 15, 19, 23, 23, 27) ############################################################################### # Test reading a netCDF file whose grid_mapping attribute uses an # expanded form def test_netcdf_expanded_form_of_grid_mapping(): if gdaltest.netcdf_drv is None: pytest.skip() ds = gdal.Open('data/netcdf/expanded_form_of_grid_mapping.nc') wkt = ds.GetProjectionRef() assert 'Transverse_Mercator' in wkt ############################################################################### ############################################################################### # main tests list ############################################################################### # basic file creation tests init_list = [ ('byte.tif', 4672, []), ('gtiff/byte_signed.tif', 4672, ['PIXELTYPE=SIGNEDBYTE']), ('int16.tif', 4672, []), ('int32.tif', 4672, []), ('float32.tif', 4672, []), ('float64.tif', 4672, []) ] # Some tests we don't need to do for each type. @pytest.mark.parametrize( 'testfunction', [ 'testSetGeoTransform', 'testSetProjection', # SetMetadata() not supported # 'testSetMetadata' ] ) @pytest.mark.require_driver('netcdf') def test_netcdf_functions_1(testfunction): ut = gdaltest.GDALTest('netcdf', 'byte.tif', 1, 4672, options=["GEOMETRY_ENCODING=WKT"]) getattr(ut, testfunction)() # Others we do for each pixel type. @pytest.mark.parametrize( 'filename,checksum,options', init_list, ids=[tup[0].split('.')[0] for tup in init_list], ) @pytest.mark.parametrize( 'testfunction', [ 'testCreateCopy', 'testCreate', 'testSetNoDataValue' ] ) @pytest.mark.require_driver('netcdf') def test_netcdf_functions_2(filename, checksum, options, testfunction): ut = gdaltest.GDALTest('netcdf', filename, 1, checksum, options=options) getattr(ut, testfunction)() ############################################################################### # simple geometry tests # basic tests def test_bad_cf1_8(): # basic resilience test, make sure it can exit "gracefully" # if not it will abort all tests bad_geometry = ogr.Open("data/netcdf-sg/no_geometry_type.nc") bad_feature = ogr.Open("data/netcdf-sg/bad_feature_test.nc") missing_node_counts_test = ogr.Open("data/netcdf-sg/missing_node_counts_test.nc") uneq_x_y = ogr.Open("data/netcdf-sg/unequal_xy.nc") corrupt_poly_1 = ogr.Open("data/netcdf-sg/corrupted_polygon_ncpncir.nc") corrupt_poly_2 = ogr.Open("data/netcdf-sg/corrupted_polygon_pnc.nc") corrupt_poly_3 = ogr.Open("data/netcdf-sg/corrupted_polygon_ir.nc") # error IS fatal assert(bad_geometry is None) assert(bad_feature is None) assert(missing_node_counts_test is None) assert(corrupt_poly_1 is None) assert(corrupt_poly_2 is None) assert(corrupt_poly_3 is None) assert(uneq_x_y is None) def test_point_read(): if gdaltest.netcdf_drv is None: pytest.skip() singleton_pt = ogr.Open("data/netcdf-sg/point_test.nc") lc = singleton_pt.GetLayerCount() assert(lc == 1) layer = singleton_pt.GetLayerByName("names_geometry") assert(layer != None) # Test each geometry directly ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (1 -1)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (2 -2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (3 -3)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (5 -5)") def test_point3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() singleton_pt = ogr.Open("data/netcdf-sg/point3D_test.nc") lc = singleton_pt.GetLayerCount() assert(lc == 1) layer = singleton_pt.GetLayerByName("names_geometry") assert(layer != None) # Test each geometry directly ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (1 -1 1)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (2 -2 -2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (3 -3 3)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (4 -4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POINT (5 -5 5)") def test_multipoint_read(): if gdaltest.netcdf_drv is None: pytest.skip() multipoints = ogr.Open("data/netcdf-sg/multipoint_test.nc") assert(multipoints != None) lc = multipoints.GetLayerCount() assert(lc == 1) layer = multipoints.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (1 -1,2 -2,3 -3,4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (5 -5,6 -6,7 -7,8 -8)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (9 -9,10 -10,-1 1,-2 2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-3 3,-4 4,-5 5,-6 6)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-7 7,-8 8,-9 9,-10 10)") def test_multipoint3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() multipoints = ogr.Open("data/netcdf-sg/multipoint3D_test.nc") assert(multipoints != None) lc = multipoints.GetLayerCount() assert(lc == 1) layer = multipoints.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (1 -1 1,2 -2 -2,3 -3 3,4 -4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (5 -5 5,6 -6 -6,7 -7 7,8 -8 -8)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (9 -9 9,10 -10 -10,-1 1 -1,-2 2 2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-3 3 -3,-4 4 4,-5 5 -5,-6 6 6)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-7 7 -7,-8 8 8,-9 9 -9,-10 10 10)") def test_line_read(): if gdaltest.netcdf_drv is None: pytest.skip() line = ogr.Open("data/netcdf-sg/line_test.nc") assert(line != None) lc = line.GetLayerCount() assert(lc == 1) layer = line.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (1 -1,2 -2,3 -3,4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (5 -5,6 -6,7 -7,8 -8)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (9 -9,10 -10,-1 1,-2 2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (-3 3,-4 4,-5 5,-6 6)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (-7 7,-8 8,-9 9,-10 10)") def test_line3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() line = ogr.Open("data/netcdf-sg/line3D_test.nc") assert(line != None) lc = line.GetLayerCount() assert(lc == 1) layer = line.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (1 -1 1,2 -2 -2,3 -3 3,4 -4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (5 -5 5,6 -6 -6,7 -7 7,8 -8 -8)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (9 -9 9,10 -10 -10,-1 1 1,-2 2 -2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (-3 3 3,-4 4 -4,-5 5 5,-6 6 -6)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "LINESTRING (-7 7 7,-8 8 -8,-9 9 9,-10 10 -10)") def test_multiline_read(): if gdaltest.netcdf_drv is None: pytest.skip() multiline = ogr.Open("data/netcdf-sg/multiline_test.nc") assert(multiline != None) lc = multiline.GetLayerCount() assert(lc == 1) layer = multiline.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((1 -1),(2 -2,3 -3,4 -4))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((5 -5,6 -6,7 -7,8 -8))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((9 -9,10 -10,-1 1),(-2 2))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((-3 3,-4 4),(-5 5,-6 6))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((-7 7,-8 8,-9 9,-10 10))") def test_multiline3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() multiline = ogr.Open("data/netcdf-sg/multiline3D_test.nc") assert(multiline != None) lc = multiline.GetLayerCount() assert(lc == 1) layer = multiline.GetLayerByName("names_geometry") assert(layer != None) ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((1 -1 -1),(2 -2 2,3 -3 -3,4 -4 4))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((5 -5 -5,6 -6 6,7 -7 -7,8 -8 8))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((9 -9 -9,10 -10 10,-1 1 -1),(-2 2 2))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((-3 3 -3,-4 4 4),(-5 5 -5,-6 6 6))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTILINESTRING ((-7 7 -7,-8 8 8,-9 9 -9,-10 10 10))") def test_polygon_read(): if gdaltest.netcdf_drv is None: pytest.skip() polygon = ogr.Open("data/netcdf-sg/polygon_test.nc") assert(polygon != None) lc = polygon.GetLayerCount() assert(lc == 1) layer = polygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((0 0,1 0,1 1,0 0))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((3 0,4 0,4 1,3 1,3 0))") def test_polygon3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() polygon = ogr.Open("data/netcdf-sg/polygon3D_test.nc") assert(polygon != None) lc = polygon.GetLayerCount() assert(lc == 1) layer = polygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((0 0 1,1 0 2,1 1 2,0 0 1))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((3 0 1,4 0 1,4 1 1,3 1 1,3 0 1))") def test_multipolygon_read(): if gdaltest.netcdf_drv is None: pytest.skip() multipolygon = ogr.Open("data/netcdf-sg/multipolygon_test.nc") assert(multipolygon != None) lc = multipolygon.GetLayerCount() assert(lc == 1) layer = multipolygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") def test_multipolygon3D_read(): if gdaltest.netcdf_drv is None: pytest.skip() multipolygon = ogr.Open("data/netcdf-sg/multipolygon3D_test.nc") assert(multipolygon != None) lc = multipolygon.GetLayerCount() assert(lc == 1) layer = multipolygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOLYGON (((0 0 0,1 0 5,1 1 5,0 0 0)))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOLYGON (((3 0 5,4 0 10,4 1 10,3 0 5)),((3 0 10,4 1 15,3 1 15,3 0 10)))") def test_serpenski_two_ring(): if gdaltest.netcdf_drv is None: pytest.skip() s = ogr.Open("data/netcdf-sg/serpenski_2nd.nc") assert(s != None) lc = s.GetLayerCount() assert(lc == 1) good_layer = s.GetLayerByName("serpenski") assert(good_layer != None) # real layer assert(good_layer.GetFeatureCount() == 1) assert(good_layer.GetGeomType() == ogr.wkbMultiPolygon) serpenski = good_layer.GetNextFeature() triangle = serpenski.GetGeometryRef() st_wkt = triangle.ExportToWkt() assert(st_wkt == \ "MULTIPOLYGON (((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0)))") def test_serpenski3D_two_ring(): if gdaltest.netcdf_drv is None: pytest.skip() s = ogr.Open("data/netcdf-sg/serpenski3D_2nd.nc") assert(s != None) lc = s.GetLayerCount() assert(lc == 1) good_layer = s.GetLayerByName("serpenski") assert(good_layer != None) # real layer assert(good_layer.GetFeatureCount() == 1) assert(good_layer.GetGeomType() == ogr.wkbMultiPolygon25D) serpenski = good_layer.GetNextFeature() triangle = serpenski.GetGeometryRef() st_wkt = triangle.ExportToWkt() assert(st_wkt == \ "MULTIPOLYGON (((0 0 1,1 0 1,0.5 0.866025403784439 1,0 0 1),(0.5 0.0 1,0.75 0.433012701892219 1,0.25 0.433012701892219 1,0.5 0.0 1)))") def test_flipped_axis(): if gdaltest.netcdf_drv is None: pytest.skip() # similar to simple polygon test, but with flipped axis polygon = ogr.Open("data/netcdf-sg/flipped_axes_test.nc") assert(polygon != None) layer = polygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((0 0,1 0,1 1,0 0))") def test_arbitrary_3Daxis_order_(): if gdaltest.netcdf_drv is None: pytest.skip() polygon = ogr.Open("data/netcdf-sg/arbitrary_axis_order_test.nc") assert(polygon != None) layer = polygon.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((0 0 1,1 0 2,1 1 2,0 0 1))") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "POLYGON ((3 0 1,4 0 1,4 1 1,3 1 1,3 0 1))") def test_multiple_layers_one_nc(): if gdaltest.netcdf_drv is None: pytest.skip() # tests whether or not an NC with multiple geometry containers can be read # each geometry container a layer s = ogr.Open("data/netcdf-sg/multiple_containers.nc") lc = s.GetLayerCount() assert(lc == 2) s_triangle = s.GetLayerByName("serpenski") s_outline = s.GetLayerByName("serpenski_outline") assert(s_triangle != None) assert(s_outline != None) triangle_ft = s_triangle.GetNextFeature() triangle = triangle_ft.GetGeometryRef() assert(triangle.GetGeometryType() == ogr.wkbMultiPolygon) st_wkt = triangle.ExportToWkt() assert(st_wkt == \ "MULTIPOLYGON (((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0)))") outline_ft = s_outline.GetNextFeature() outline = outline_ft.GetGeometryRef() assert(outline.GetGeometryType() == ogr.wkbMultiLineString) so_wkt = outline.ExportToWkt() assert(so_wkt == \ "MULTILINESTRING ((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0))") # advanced tests def test_yahara(): if gdaltest.netcdf_drv is None: pytest.skip() yahara = ogr.Open("data/netcdf-sg/Yahara_alb.nc") assert(yahara != None) y_layer = yahara.GetLayerByName("geometry_container") assert(y_layer != None) # Assert some basic properties assert(y_layer.GetFeatureCount() == 71) assert(y_layer.GetGeomType() == ogr.wkbMultiPolygon) # Test getting a single feature through iteration first = y_layer.GetNextFeature() # Check fields are set correctly assert(first.GetFieldAsInteger("ID") == 1) assert(first.GetFieldAsInteger("GRIDCODE") == 55) assert(first.GetFieldAsDouble("X_COORD") == 577251.43302) assert(first.GetFieldAsDouble("Y_COORD") == 319799.04918) # Check spatial ref is set correctly fSRS = y_layer.GetSpatialRef() assert(fSRS is not None) assert(fSRS.ExportToWkt() == "PROJCS[\"unnamed\",GEOGCS[\"unknown\",DATUM[\"unnamed\",SPHEROID[\"Spheroid\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Albers_Conic_Equal_Area\"],PARAMETER[\"latitude_of_center\",23],PARAMETER[\"longitude_of_center\",-96],PARAMETER[\"standard_parallel_1\",29.5],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"false_easting\",0],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]") def test_states_full_layer(): if gdaltest.netcdf_drv is None: pytest.skip() states = ogr.Open("data/netcdf-sg/cf1.8_states.nc") assert(states != None) s_layer = states.GetLayerByName("geometry_container") assert(s_layer != None) # Assert some basic properties (again) assert(s_layer.GetFeatureCount() == 49) assert(s_layer.GetGeomType() == ogr.wkbMultiPolygon) # Test getting two features first = s_layer.GetNextFeature() second = s_layer.GetNextFeature() # try resetting and then trying again s_layer.ResetReading() first_2 = s_layer.GetNextFeature() # Did reset work correctly? assert(first.Equal(first_2)) # Sanity check assert(first.Equal(second) != True) # Check fields are set correctly assert(second.GetFieldAsString("STATE_NAME") == "Montana") assert(second.GetFieldAsInteger("DRAWSEQ") == 3) assert(second.GetFieldAsString("STATE_FIPS") == "30") assert(second.GetFieldAsString("STATE_ABBR") == "MT") ############################################################################### # simple geometry writing tests def test_point_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/point_write_test.json", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/test_point_write.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/test_point_write.nc") assert(src is not None) assert(src.GetLayerCount() == 1) # Test layer properties layer = nc_tsrc.GetLayerByName("point_collection") assert(layer is not None) assert(layer.GetFeatureCount() == 4) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (0.5 -0.5)") assert(fnam == "FishingSpot1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1 -1)") assert(fnam == "FishingSpot2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1.5 -1.5)") assert(fnam == "FishingSpot3") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (2 -2)") assert(fnam == "FishingSpot4") def test_point3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/point3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/test_point3D_write.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/test_point3D_write.nc") assert(src is not None) assert(src.GetLayerCount() == 1) # Test layer properties layer = nc_tsrc.GetLayerByName("point_collection") assert(layer is not None) assert(layer.GetFeatureCount() == 4) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (0.5 -0.5 -1.5)") assert(fnam == "FishingSpot1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1 -1 -0.5)") assert(fnam == "FishingSpot2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1.5 -1.5 0.5)") assert(fnam == "FishingSpot3") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (2 -2 1.5)") assert(fnam == "FishingSpot4") def test_line_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/line_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/line_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/line_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("segv") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (1.5 -1.5)") assert(fnam == "seg1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (30.5 30.5,5 5)") assert(fnam == "seg2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (9 -9,10 -10,-1 1)") assert(fnam == "seg3") def test_line3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/line3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/line3D_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/line3D_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("path") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (0.1 0.2 0.3,99 -99 0)") assert(fnam == "path1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (100 101 102,25 27 29)") assert(fnam == "path2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "LINESTRING (7 -11 -7,-11 7 11,-6 1945 1918)") assert(fnam == "path3") def test_polygon_no_ir_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/polygon_no_ir_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/polygon_no_ir_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("noir_write") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually # Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POLYGON ((0 0,1 0,1 1,0 0))") assert(fnam == "Triangle") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POLYGON ((3 0,4 0,4 1,3 1,3 0))") assert(fnam == "Square") def test_polygon_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/polygon_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/polygon_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("shapes") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually # Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 1,3 0),(3.5 0.25,3.75 0.25,3.75 0.5,3.5 0.5,3.5 0.25)))") assert(fnam == "Square_in_Square") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,-1 0,-1 -1,0 0)))") assert(fnam == "Triangle_Flipped") def test_polygon3D_no_ir_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon3D_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/polygon3D_no_ir_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/polygon3D_no_ir_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("noir_write") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually # Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fid = feat.GetFieldAsInteger("ID") assert(fWkt == "POLYGON ((0 0 0,1 0 2,1 1 0,0 0 2))") assert(fid == 0) # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fid = feat.GetFieldAsInteger("ID") assert(fWkt == "POLYGON ((3 0 -1,4 0 -2,4 1 0,3 1 -2,3 0 -1))") assert(fid == 1) def test_polygon3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/polygon3D_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/polygon3D_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("shapes") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually # Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 -1,1 1 -2,0 0 -3)))") assert(fnam == "Trianglything") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 1,3.75 0.25 1,3.75 0.5 1,3.5 0.5 1,3.5 0.25 1)))") assert(fnam == "Prismthing") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0 0,-1 0 1,-1 -1 2,0 0 3)))") assert(fnam == "Trianglyflipped") def test_multipoint_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipoint_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipoint_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipoint_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("peak_list") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOINT (1 -1,2 -2,4 -4)") assert(fnam == "Peaks1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOINT (5 -5,6 -6,8 -8)") assert(fnam == "Peaks2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOINT (9 -9,10 -10,-2 2)") assert(fnam == "Peaks3") def test_multipoint3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipoint3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipoint3D_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipoint3D_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("drilling_sites") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOINT (0 -1 -5,2 -2 2)") assert(fnam == "site1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOINT (7 -2 1,4 3 2,8 -8 3)") assert(fnam == "site2") def test_multiline_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multiline_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multiline_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multiline_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("streams") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTILINESTRING ((1 -5),(2 -4,3 -3,4 -2,5 -1))") assert(fnam == "fresh_river") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTILINESTRING ((-2 5,-3 4,-4 3,-5 2))") assert(fnam == "not_so_fresh_river") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTILINESTRING ((0 1,1 0),(2 0,-2 0))") assert(fnam == "not_fresh_river") def test_multiline3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multiline3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multiline3D_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multiline3D_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("streams") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTILINESTRING ((1 -5 10),(2 -4 9,3 -3 8,4 -2 7,5 -1 8))") assert(fnam == "fresh_river") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTILINESTRING ((0 1 1,1 0 2),(2 0 1,-2 0 1))") assert(fnam == "not_fresh_river") def test_multipolygon_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipolygon_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipolygon_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("shapes") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually # Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)),((0 0,-1 0,-1 -1,0 0)))") assert(fnam == "Triangles") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 1,3 0),(3.5 0.25,3.75 0.25,3.75 0.5,3.5 0.5,3.5 0.25)),((4 4,4 5,5 4,4 4)))") assert(fnam == "Square_in_Square_and_Triangle") def test_multipolygon3D_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipolygon3D_write_test.nc4", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipolygon3D_write_test.nc4") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("shapes") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually # Due to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 1,0 0 0)),((0 0 0,-1 0 -1,-1 -1 -1,0 0 0)))") assert(fnam == "Trianglies") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 0,3.75 0.25 0,3.75 0.5 0.1,3.5 0.5 0.1,3.5 0.25 0)),((4 4 100,4 5 101,5 4 101,4 4 100)))") assert(fnam == "Prismy_and_Triangly") # This third feature is just a Polygon feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((-2 0 -5,-2 1 -6,-1 1 -6,-2 0 -5)))") assert(fnam == "Single_Triangly") def test_multipolygon_with_no_ir_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipolygon_no_ir_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipolygon_no_ir_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") assert(fnam == "DoubleTriangle") def test_multipolygon3D_with_no_ir_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipolygon3D_no_ir_write_test.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/multipolygon3D_no_ir_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 2,0 0 3)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 1,4 1 2,3 0 3)),((3 0 -1,4 1 -2,3 1 -3,3 0 -4)))") assert(fnam == "DoubleTriangle") def test_write_buffer_restrict_correctness(): if gdaltest.netcdf_drv is None: pytest.skip() # Tests whether or not having the write buffer restriction # Writes correct data. src = gdal.OpenEx("data/netcdf-sg/write-tests/Yahara_alb.json") assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/Yahara_alb_4K_restrict.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096']) gdal.VectorTranslate("tmp/Yahara_alb_default_buf.nc", src, format="netCDF") fk_ds = ogr.Open("tmp/Yahara_alb_4K_restrict.nc") db_ds = ogr.Open("tmp/Yahara_alb_default_buf.nc") fk_ds_layer = fk_ds.GetLayerByName("geometry_container") db_ds_layer = db_ds.GetLayerByName("geometry_container") assert(fk_ds_layer is not None) assert(db_ds_layer is not None) for feat in range(71): lft = fk_ds_layer.GetNextFeature() dft = db_ds_layer.GetNextFeature() lftgeo = lft.GetGeometryRef() dftgeo = dft.GetGeometryRef() assert(lftgeo.Equal(dftgeo)) def test_write_nc_from_nc(): if gdaltest.netcdf_drv is None: pytest.skip() # Tests writing a netCDF file (of different name than source) out from another netCDF source file src = gdal.OpenEx("data/netcdf-sg/multipoint_test.nc", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipoint_test_replica.nc", src, format="netCDF") ncds = ogr.Open("tmp/multipoint_test_replica.nc") assert(src is not None) layer = ncds.GetLayerByName("names_geometry") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (1 -1,2 -2,3 -3,4 -4)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (5 -5,6 -6,7 -7,8 -8)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (9 -9,10 -10,-1 1,-2 2)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-3 3,-4 4,-5 5,-6 6)") ft = layer.GetNextFeature() ft_geo = ft.GetGeometryRef() ft_wkt = ft_geo.ExportToWkt() assert(ft_wkt == "MULTIPOINT (-7 7,-8 8,-9 9,-10 10)") def test_multipolygon_with_no_ir_NC4_write(): if gdaltest.netcdf_drv is None: pytest.skip() # Almost identical to test_multipolygon_with_no_ir # except this time, it is writing an NC4 file src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/multipolygon_no_ir_write_test.nc4", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4']) nc_tsrc = ogr.Open("tmp/multipolygon_no_ir_write_test.nc4") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") assert(fnam == "DoubleTriangle") def test_multipolygon3D_NC4C_write(): if gdaltest.netcdf_drv is None: pytest.skip() src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) # This test is identical to test_multipolygon3D_write # except it writes to NC4C gdal.VectorTranslate("tmp/multipolygon3D_write_test.nc", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4C']) nc_tsrc = ogr.Open("tmp/multipolygon3D_write_test.nc") assert(src is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("shapes") assert(layer is not None) assert(layer.GetFeatureCount() == 3) # Test each feature manually # Due to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons # But when being written out, they are OGRFeature POLYGON feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 1,0 0 0)),((0 0 0,-1 0 -1,-1 -1 -1,0 0 0)))") assert(fnam == "Trianglies") # This second feature has an interior ring in it feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 0,3.75 0.25 0,3.75 0.5 0.1,3.5 0.5 0.1,3.5 0.25 0)),((4 4 100,4 5 101,5 4 101,4 4 100)))") assert(fnam == "Prismy_and_Triangly") # This third feature is just a Polygon feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((-2 0 -5,-2 1 -6,-1 1 -6,-2 0 -5)))") assert(fnam == "Single_Triangly") def test_netcdf_dimension_labels_with_null(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # Crashes with 4.1.3 of Ubuntu Precise if gdaltest.netcdf_drv_version.startswith('4.0.') or gdaltest.netcdf_drv_version.startswith('4.1.'): pytest.skip('Test crashes with this libnetcdf version') with gdaltest.error_handler(): assert gdal.Open('data/netcdf/dimension_labels_with_null.nc') def test_write_multiple_layers_one_nc(): if gdaltest.netcdf_drv is None: pytest.skip() # tests writing multiple layers in NC3 # each geometry container a layer # this also tests "update mode" for CF-1.8 src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/mlnc.nc", src, format="netCDF") src = gdal.OpenEx("data/netcdf-sg/write-tests/point3D_write_test.json", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/mlnc.nc", src, format="netCDF", accessMode='update') nc_tsrc = ogr.Open("tmp/mlnc.nc") assert(nc_tsrc.GetLayerCount() == 2) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") assert(fnam == "DoubleTriangle") # Test layer properties layer = nc_tsrc.GetLayerByName("point_collection") assert(layer is not None) assert(layer.GetFeatureCount() == 4) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (0.5 -0.5 -1.5)") assert(fnam == "FishingSpot1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1 -1 -0.5)") assert(fnam == "FishingSpot2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1.5 -1.5 0.5)") assert(fnam == "FishingSpot3") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (2 -2 1.5)") assert(fnam == "FishingSpot4") def test_write_multiple_layers_one_nc_NC4(): if gdaltest.netcdf_drv is None: pytest.skip() # nearly identical to previous test except that # it writes to NC4, not NC3 (changing a file from NC3 to NC4) # and it writes them all at once (non update) src = gdal.OpenEx("tmp/mlnc.nc", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/mlnc4.nc4", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4']) nc_tsrc = ogr.Open("tmp/mlnc4.nc4") assert(nc_tsrc.GetLayerCount() == 2) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") assert(fnam == "DoubleTriangle") # Test layer properties layer = nc_tsrc.GetLayerByName("point_collection") assert(layer is not None) assert(layer.GetFeatureCount() == 4) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (0.5 -0.5 -1.5)") assert(fnam == "FishingSpot1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1 -1 -0.5)") assert(fnam == "FishingSpot2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1.5 -1.5 0.5)") assert(fnam == "FishingSpot3") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (2 -2 1.5)") assert(fnam == "FishingSpot4") def test_write_multiple_layers_one_nc_back_to_NC3(): if gdaltest.netcdf_drv is None: pytest.skip() # nearly identical to previous test except that # it writes to from NC4 to NC3 # and it writes them all at once (non update) # test_write_multiple_layers_one_nc writes one and then another in update mode src = gdal.OpenEx("tmp/mlnc4.nc4", gdal.OF_VECTOR) assert(src is not None) gdal.VectorTranslate("tmp/mlnc_noupdate3.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/mlnc_noupdate3.nc") assert(nc_tsrc.GetLayerCount() == 2) # Test layer properties layer = nc_tsrc.GetLayerByName("mpoly_shape") assert(layer is not None) assert(layer.GetFeatureCount() == 2) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))") assert(fnam == "Triangle") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))") assert(fnam == "DoubleTriangle") # Test layer properties layer = nc_tsrc.GetLayerByName("point_collection") assert(layer is not None) assert(layer.GetFeatureCount() == 4) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (0.5 -0.5 -1.5)") assert(fnam == "FishingSpot1") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1 -1 -0.5)") assert(fnam == "FishingSpot2") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (1.5 -1.5 0.5)") assert(fnam == "FishingSpot3") feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") assert(fWkt == "POINT (2 -2 1.5)") assert(fnam == "FishingSpot4") def test_SG_NC3_field_write(): # Tests all the NC3 field writing capabilities with # buffering. src = gdal.OpenEx("data/netcdf-sg/write-tests/field_test_nc3.nc", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/bufft.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/bufft.nc") assert(nc_tsrc is not None) # Test layer properties layer = nc_tsrc.GetLayerByName("names_geometry") assert(layer is not None) assert(layer.GetFeatureCount() == 1) # Test each feature manually feat = layer.GetNextFeature() fgeo = feat.GetGeometryRef() fWkt = fgeo.ExportToWkt() fnam = feat.GetFieldAsString("NAMES") fid = feat.GetFieldAsInteger("IDS") fnum1 = feat.GetFieldAsInteger("NUM_1") fnum2 = feat.GetFieldAsInteger("NUM_2") fflt = feat.GetFieldAsDouble("FL") fdbl = feat.GetFieldAsDouble("DBL") assert(fWkt == "POINT (1 -1)") assert(fnam == "Guage_1") assert(fid == 0) assert(fnum1 == 1) assert(fnum2 == 2) assert(fflt == 1.5) assert(fdbl == 99.5) def test_states_full_layer_buffer_restrict_correctness(): # Tests whether or not having the write buffer restriction # Writes correct data. # Note: this is different than the Yahara version in that it also tests # Correctness of writing buffered NC_CHARs and NC_STRINGs (NC4) src = gdal.OpenEx("data/netcdf-sg/write-tests/cf1.8_states.json") assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/states_4K_restrict.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096']) gdal.VectorTranslate("tmp/states_default_buf.nc", src, format="netCDF") fk_ds = ogr.Open("tmp/states_4K_restrict.nc") db_ds = ogr.Open("tmp/states_default_buf.nc") fk_ds_layer = fk_ds.GetLayerByName("geometry_container") db_ds_layer = db_ds.GetLayerByName("geometry_container") assert(fk_ds_layer is not None) assert(db_ds_layer is not None) for feat in range(49): lft = fk_ds_layer.GetNextFeature() dft = db_ds_layer.GetNextFeature() lftgeo = lft.GetGeometryRef() dftgeo = dft.GetGeometryRef() assert(lftgeo.Equal(dftgeo)) def test_empty_polygon_read_write(): # Tests writing features to a layer of empty polygons src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_polygon_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/empty_polygon.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/empty_polygon.nc") assert(nc_tsrc is not None) nc_layer = nc_tsrc.GetLayerByName("places") assert(nc_layer.GetFeatureCount() == 2) first = nc_layer.GetNextFeature() assert(first.GetFieldAsString("NAMES") == "Somewhere") assert(first.GetGeometryRef().ExportToWkt() == "POLYGON ((0 1,1 0,2 0,0 1))") second = nc_layer.GetNextFeature() assert(second.GetFieldAsString("NAMES") == "Everywhere") assert(second.GetGeometryRef().IsEmpty()) def test_empty_multiline_read_write(): # Tests writing features to a layer of empty polygons src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_mline_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/empty_mline.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/empty_mline.nc") assert(nc_tsrc is not None) nc_layer = nc_tsrc.GetLayerByName("places") assert(nc_layer.GetFeatureCount() == 2) first = nc_layer.GetNextFeature() assert(first.GetFieldAsString("NAMES") == "Somewhere") assert(first.GetGeometryRef().ExportToWkt() == "MULTILINESTRING ((0 5,2 0))") second = nc_layer.GetNextFeature() assert(second.GetFieldAsString("NAMES") == "Everywhere") assert(second.GetGeometryRef().IsEmpty()) def test_empty_multipolygon_read_write(): # Tests writing features to a layer of empty polygons src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_multipolygon_write_test.json", gdal.OF_VECTOR) assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/empty_multipolygon.nc", src, format="netCDF") nc_tsrc = ogr.Open("tmp/empty_multipolygon.nc") assert(nc_tsrc is not None) nc_layer = nc_tsrc.GetLayerByName("places") assert(nc_layer.GetFeatureCount() == 2) first = nc_layer.GetNextFeature() assert(first.GetFieldAsString("NAMES") == "Nowhere") assert(first.GetGeometryRef().IsEmpty()) second = nc_layer.GetNextFeature() assert(second.GetFieldAsString("NAMES") == "Somewhere") assert(second.GetGeometryRef().ExportToWkt() == "MULTIPOLYGON (((0 0,2 0,2 2,0 2,0 0)))") def test_states_full_layer_buffer_restrict_correctness_single_datum(): # Single datum regression test src = gdal.OpenEx("data/netcdf-sg/write-tests/cf1.8_states.json") assert(src is not None) assert(src.GetLayerCount() == 1) gdal.VectorTranslate("tmp/states_4K_restrict_sd.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096', "GROUPLESS_WRITE_BACK=YES"]) fk_ds = ogr.Open("tmp/states_4K_restrict_sd.nc") db_ds = ogr.Open("tmp/states_4K_restrict.nc") fk_ds_layer = fk_ds.GetLayerByName("geometry_container") db_ds_layer = db_ds.GetLayerByName("geometry_container") assert(fk_ds_layer is not None) assert(db_ds_layer is not None) for feat in range(49): lft = fk_ds_layer.GetNextFeature() dft = db_ds_layer.GetNextFeature() lftgeo = lft.GetGeometryRef() dftgeo = dft.GetGeometryRef() assert(lftgeo.Equal(dftgeo)) def test_netcdf_uint16_netcdf4_without_fill(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # This dataset was created with nc_def_var_fill(cdfid, nZId, NC_NOFILL, NULL) # Check that we don't report a nodata value ds = gdal.Open('data/netcdf/uint16_netcdf4_without_fill.nc') assert not ds.GetRasterBand(1).GetNoDataValue() def test_netcdf_sen3_sral_mwr_fake_standard_measurement(): if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = gdal.OpenEx('data/netcdf/sen3_sral_mwr_fake_standard_measurement.nc', gdal.OF_RASTER) assert not ds ds = gdal.OpenEx('data/netcdf/sen3_sral_mwr_fake_standard_measurement.nc', gdal.OF_VECTOR) assert ds assert ds.GetLayerCount() == 3 lyr = ds.GetLayer(0) assert lyr.GetName() == 'sen3_sral_mwr_fake_standard_measurement_time_01' assert lyr.GetSpatialRef() is not None assert lyr.GetLayerDefn().GetFieldCount() == 5 assert lyr.TestCapability(ogr.OLCFastFeatureCount) == 1 assert lyr.TestCapability(ogr.OLCRandomRead) == 1 assert lyr.TestCapability(ogr.OLCRandomWrite) == 0 assert lyr.GetFeatureCount() == 2 assert lyr.GetMetadata_Dict() == { 'alt_01_comment': 'Altitude of satellite above the reference ellipsoid', 'alt_01_long_name': 'altitude of the satellite : 1 Hz', 'alt_01_standard_name': 'height_above_reference_ellipsoid', 'alt_01_units': 'm', 'orb_alt_rate_01_comment': 'The reference surface for the orbital altitude rate is the combined mean_sea_surface/geoid surface. It is used to compute the Doppler correction on the altimeter range', 'orb_alt_rate_01_long_name': 'orbital altitude rate : 1 Hz', 'orb_alt_rate_01_units': 'm/s', 'surf_type_01_flag_meanings': 'ocean_or_semi_enclosed_sea enclosed_sea_or_lake continental_ice land', 'surf_type_01_flag_values': '{0,1,2,3}', 'surf_type_01_long_name': 'surface type : 1 Hz', 'time_01_calendar': 'gregorian', 'time_01_long_name': 'UTC: 1 Hz', 'time_01_standard_name': 'time', 'time_01_units': 'seconds since 2000-01-01 00:00:00.0', 'total_electron_content_01_long_name': 'Altimeter-derived total electron content (TECU) : 1 Hz', 'total_electron_content_01_units': 'count' } assert lyr.GetMetadataItem('alt_01_units') == 'm' f = lyr.GetNextFeature() assert f.GetGeometryRef().GetX() == pytest.approx(2.234567, 1e-7) assert f.GetGeometryRef().GetY() == pytest.approx(49.234567, 1e-7) assert f['time_01'] == 1.25 assert not f.IsFieldSet("surf_type_01") assert not f.IsFieldSet("orb_alt_rate_01") assert not f.IsFieldSet("total_electron_content_01") f = lyr.GetNextFeature() assert f['time_01'] == 2.25 assert f['surf_type_01'] == 1 assert f['orb_alt_rate_01'] == 0.01 assert f['total_electron_content_01'] == 10000000000.0 assert lyr.GetNextFeature() is None assert lyr.GetNextFeature() is None lyr.ResetReading() assert lyr.GetNextFeature() is not None lyr.SetSpatialFilterRect(-50,-50,-50,-50) lyr.ResetReading() assert lyr.GetNextFeature() is None assert lyr.GetFeatureCount() == 0 lyr.SetSpatialFilter(None) lyr.SetAttributeFilter('0 = 1') lyr.ResetReading() assert lyr.GetNextFeature() is None assert lyr.GetFeature(0) is None assert lyr.GetFeature(1).GetFID() == 1 assert lyr.GetFeature(3) is None lyr = ds.GetLayer(1) assert lyr.GetName() == 'sen3_sral_mwr_fake_standard_measurement_time_20_ku' f = lyr.GetNextFeature() assert not f.IsFieldSet('nb_stack_20_ku') f = lyr.GetNextFeature() assert f['nb_stack_20_ku'] == 1 def test_netcdf_chunked_multiple(): if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = gdal.Open('data/netcdf/byte_chunked_multiple.nc') assert ds.GetRasterBand(1).GetBlockSize() == [10, 10] assert ds.GetRasterBand(1).Checksum() == 4672 def test_netcdf_chunked_not_multiple(): if not gdaltest.netcdf_drv_has_nc4: pytest.skip() ds = gdal.Open('data/netcdf/byte_chunked_not_multiple.nc') assert ds.GetRasterBand(1).GetBlockSize() == [15, 6] assert ds.GetRasterBand(1).Checksum() == 4672 def test_netcdf_create(): ds = gdaltest.netcdf_drv.Create('tmp/test_create.nc', 2, 2) ds.SetGeoTransform([2, 0.1, 0, 49, 0, -0.1]) ds.GetRasterBand(1).WriteRaster(0, 0, 2, 2, b'ABCD') ds = None ds = gdal.Open('tmp/test_create.nc') assert ds.GetGeoTransform() == pytest.approx([2, 0.1, 0, 49, 0, -0.1], rel=1e-10) assert ds.GetRasterBand(1).ReadRaster() == b'ABCD' ds = None gdal.Unlink('tmp/test_create.nc') def test_netcdf_sg1_8_max_variable_with_max_width_string_field_no_warning(): gdal.VectorTranslate("tmp/poly.nc", "../ogr/data/poly.shp", format="netCDF") gdal.ErrorReset() # Check that opening in raster/vector mode doesn't emit warning ds = gdal.OpenEx("tmp/poly.nc") assert gdal.GetLastErrorType() == 0 assert ds assert ds.GetLayerCount() == 1 ds = None gdal.Unlink('tmp/poly.nc') ############################################################################### # Test opening a netCDF 4 file whose HDF5 signature is not at the beginning def test_netcdf_hdf5_signature_not_at_beginning(): if gdaltest.netcdf_drv is None: pytest.skip() if not gdaltest.netcdf_drv_has_nc4: pytest.skip() # Works at least with since netCDF 4.7 version = gdaltest.netcdf_drv_version.split('.') if int(version[0]) * 100 + int(version[1]) < 407: pytest.skip() ds = gdal.Open('data/netcdf/byte_hdf5_starting_at_offset_1024.nc') assert ds is not None def test_clean_tmp(): # [KEEP THIS AS THE LAST TEST] # i.e. please do not add any tests after this one. Put new ones above. # Not actually a test, just cleans up tmp... gdaltest.clean_tmp() pytest.skip()
road_speed_limiter.py
import json import os import select import threading import time import socket import fcntl import struct from threading import Thread from cereal import messaging from common.params import Params from common.numpy_fast import clip, mean from common.realtime import sec_since_boot from selfdrive.config import Conversions as CV from selfdrive.kegman_kans_conf import kegman_kans_conf kegman_kans = kegman_kans_conf() CAMERA_SPEED_FACTOR = float(kegman_kans.conf['CAMERA_SPEED_FACTOR']) class Port: BROADCAST_PORT = 2899 RECEIVE_PORT = 2843 LOCATION_PORT = 2911 class RoadLimitSpeedServer: def __init__(self): self.json_road_limit = None self.active = 0 self.last_updated = 0 self.last_updated_active = 0 self.last_exception = None self.lock = threading.Lock() self.remote_addr = None broadcast = Thread(target=self.broadcast_thread, args=[]) broadcast.setDaemon(True) broadcast.start() # gps = Thread(target=self.gps_thread, args=[]) # gps.setDaemon(True) # gps.start() def gps_thread(self): sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal']) with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: while True: try: sm.update() if self.remote_addr is not None and sm.updated['gpsLocationExternal']: location = sm['gpsLocationExternal'] json_location = json.dumps([ location.latitude, location.longitude, location.altitude, location.speed, location.bearingDeg, location.accuracy, location.timestamp, location.source, location.vNED, location.verticalAccuracy, location.bearingAccuracyDeg, location.speedAccuracy, ]) address = (self.remote_addr[0], Port.LOCATION_PORT) sock.sendto(json_location.encode(), address) else: time.sleep(1.) except Exception as e: print("exception", e) time.sleep(1.) def get_broadcast_address(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fcntl.ioctl( s.fileno(), 0x8919, struct.pack('256s', 'wlan0'.encode('utf-8')) )[20:24] return socket.inet_ntoa(ip) except: return None def broadcast_thread(self): broadcast_address = None frame = 0 with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) while True: try: if broadcast_address is None or frame % 10 == 0: broadcast_address = self.get_broadcast_address() print('broadcast_address', broadcast_address) if broadcast_address is not None: address = (broadcast_address, Port.BROADCAST_PORT) sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address) except: pass time.sleep(5.) frame += 1 except: pass def send_sdp(self, sock): try: sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), (self.remote_addr[0], Port.BROADCAST_PORT)) except: pass def udp_recv(self, sock): ret = False try: ready = select.select([sock], [], [], 1.) ret = bool(ready[0]) if ret: data, self.remote_addr = sock.recvfrom(2048) json_obj = json.loads(data.decode()) if 'cmd' in json_obj: try: os.system(json_obj['cmd']) ret = False except: pass if 'echo' in json_obj: try: echo = json.dumps(json_obj["echo"]) sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT)) ret = False except: pass try: self.lock.acquire() try: if 'active' in json_obj: self.active = json_obj['active'] self.last_updated_active = sec_since_boot() except: pass if 'road_limit' in json_obj: self.json_road_limit = json_obj['road_limit'] self.last_updated = sec_since_boot() finally: self.lock.release() except: try: self.lock.acquire() self.json_road_limit = None finally: self.lock.release() return ret def check(self): now = sec_since_boot() if now - self.last_updated > 20.: try: self.lock.acquire() self.json_road_limit = None finally: self.lock.release() if now - self.last_updated_active > 10.: self.active = 0 def get_limit_val(self, key, default=None): try: if self.json_road_limit is None: return default if key in self.json_road_limit: return self.json_road_limit[key] except: pass return default def main(): server = RoadLimitSpeedServer() roadLimitSpeed = messaging.pub_sock('roadLimitSpeed') with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: try: try: sock.bind(('0.0.0.0', 843)) except: sock.bind(('0.0.0.0', Port.RECEIVE_PORT)) sock.setblocking(False) while True: if server.udp_recv(sock): dat = messaging.new_message() dat.init('roadLimitSpeed') dat.roadLimitSpeed.active = server.active dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0) dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False) dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0) dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0) dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0) dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0) dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0) roadLimitSpeed.send(dat.to_bytes()) server.send_sdp(sock) server.check() except Exception as e: server.last_exception = e class RoadSpeedLimiter: def __init__(self): self.slowing_down = False self.started_dist = 0 self.sock = messaging.sub_sock("roadLimitSpeed") self.roadLimitSpeed = None def recv(self): try: dat = messaging.recv_sock(self.sock, wait=False) if dat is not None: self.roadLimitSpeed = dat.roadLimitSpeed except: pass def get_active(self): self.recv() if self.roadLimitSpeed is not None: return self.roadLimitSpeed.active return 0 def get_max_speed(self, CS, v_cruise_speed): log = "" self.recv() if self.roadLimitSpeed is None: return 0, 0, 0, False, "" try: road_limit_speed = self.roadLimitSpeed.roadLimitSpeed is_highway = self.roadLimitSpeed.isHighway cam_type = int(self.roadLimitSpeed.camType) cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist cam_limit_speed = self.roadLimitSpeed.camLimitSpeed section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed section_left_dist = self.roadLimitSpeed.sectionLeftDist if is_highway is not None: if is_highway: MIN_LIMIT = 40 MAX_LIMIT = 120 else: MIN_LIMIT = 30 MAX_LIMIT = 100 else: MIN_LIMIT = 30 MAX_LIMIT = 120 v_ego = CS.vEgo if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0: diff_speed = v_ego * 3.6 - (cam_limit_speed * CAMERA_SPEED_FACTOR) starting_dist = v_ego * 20. safe_dist = v_ego * 5.5 if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < starting_dist): if not self.slowing_down: self.started_dist = cam_limit_speed_left_dist self.slowing_down = True first_started = True else: first_started = False td = self.started_dist - safe_dist d = cam_limit_speed_left_dist - safe_dist if d > 0. and td > 0. and diff_speed > 0. and (section_left_dist is None or section_left_dist < 10): pp = (d / td) ** 0.6 else: pp = 0 return cam_limit_speed * CAMERA_SPEED_FACTOR + int(pp * diff_speed), \ cam_limit_speed, cam_limit_speed_left_dist, first_started, log self.slowing_down = False return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0: if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT: if not self.slowing_down: self.slowing_down = True first_started = True else: first_started = False return section_limit_speed * CAMERA_SPEED_FACTOR, section_limit_speed, section_left_dist, first_started, log self.slowing_down = False return 0, section_limit_speed, section_left_dist, False, log except Exception as e: log = "Ex: " + str(e) pass self.slowing_down = False return 0, 0, 0, False, log road_speed_limiter = None def road_speed_limiter_get_active(): global road_speed_limiter if road_speed_limiter is None: road_speed_limiter = RoadSpeedLimiter() return road_speed_limiter.get_active() def road_speed_limiter_get_max_speed(CS, v_cruise_speed): global road_speed_limiter if road_speed_limiter is None: road_speed_limiter = RoadSpeedLimiter() return road_speed_limiter.get_max_speed(CS, v_cruise_speed) def get_road_speed_limiter(): global road_speed_limiter if road_speed_limiter is None: road_speed_limiter = RoadSpeedLimiter() return road_speed_limiter if __name__ == "__main__": main()
ominibot_car_com.py
#!/usr/bin/env python # coding=UTF-8 # Copyright (c) 2020, iCShop, Inc. # All rights reserved. # # Developer: Lin Wei-Chih, kjoelovelife@gmail.com, on 2020-12-22 # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributi ons in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ## Import Libraries import numpy as np import time, struct, binascii, math, threading, sys from serial import Serial, SerialException from functools import reduce class Ominibot_Car(object): def __init__(self, port="ominibot_car", baud=115200, timeout=None, py_version=3): ## setup connected parameter self.version = self.__version__() self.param = { "port": port, "baud": baud, "timeout": timeout, "send_interval": 0.1, "imu_freq": 100, "encoder_freq": 25, "battery_freq": 1, "interrupt_time": 1.5, "motor_correct": (0, 0, 0, 0), } self._serialOK = False self._is_synced = False self._imu_new_data = False self._odom_new_data = False self._battery_new_data = False self._first_odom = True self._first_battery = True self.error_flag = False self.t_stop = threading.Event() try: print("Opening serial port: {}".format(self.param["port"])) self.connect() except SerialException as error: print(error) raise return ###### auto return value ###### self.imu = {"accel":[0, 0, 0], "gyro":[0, 0, 0]} self.imu_bfr = {"accel":[0, 0, 0], "gyro":[0, 0, 0]} self.odom = [0, 0, 0, 0] self.odom_bfr = [0, 0, 0, 0] self.battery = [0, 0, 0] self.battery_bfr = [0, 0, 0] self.imu_seq = 0 self.odom_seq = 0 self.battery_seq = 0 self.last_imu_seq = 0 self.last_odom_seq = 0 self.last_battery_seq = 0 ###### read value ###### self.system_value = { "speed_limit" : [0, 0, 0, 0], "location_limit" : [0, 0, 0, 0], "location_kp" : [0, 0, 0, 0], "location_ki" : [0, 0, 0, 0], "location_kd" : [0, 0, 0, 0], "speed_kp" : [0, 0, 0, 0], "speed_ki" : [0, 0, 0, 0], "gyro_compensate": [0, 0, 0, 0], "system_mode" : [0, 0, 0, 0], "gyro_correct" : [0, 0, 0, 0], "motor_voltage" : [0, 500], "battery_voltage" : [32767, 32767], "gyro_turn_angle": [0, 0, 0, 0], } if py_version == 3: self.respond = { "head": 0x23, "auto_head": 0xFF, "speed_limit": 0x01, "location_limit": 0x02, "location_kp": 0x03, "location_ki": 0x04, "location_kd": 0x05, "speed_kp": 0x06, "speed_ki": 0x07, "gyro_compensate": 0x08, "system_mode": 0x09, "gyro_correct": 0x0A, "motor_voltage": 0x0B, "battery_voltage": 0x0C, "gyro_turn_angle": 0x20, "auto_gyro": 0xFA, "auto_encoder": 0xFB, "auto_battery": 0xFC, } elif py_version == 2: self.respond = { "head": '\x23', "auto_head":'\xFF', "speed_limit": '\x01', "location_limit": '\x02', "location_kp": '\x03', "location_ki": '\x04', "location_kd": '\x05', "speed_kp": '\x06', "speed_ki": '\x07', "gyro_compensate": '\x08', "system_mode": '\x09', "gyro_correct": '\x0A', "motor_voltage": '\x0B', "battery_voltage": '\x0C', "gyro_turn_angle": '\x20', "auto_gyro": '\xFA', "auto_encoder": '\xFB', "auto_battery": '\xFC', } else: print("Please check out your python version. Default use python3.") self.respond = { "head": 0x23, "auto_head": 0xFF, "speed_limit": 0x01, "location_limit": 0x02, "location_kp": 0x03, "location_ki": 0x04, "location_kd": 0x05, "speed_kp": 0x06, "speed_ki": 0x07, "gyro_compensate": 0x08, "system_mode": 0x09, "gyro_correct": 0x0A, "motor_voltage": 0x0B, "battery_voltage": 0x0C, "gyro_turn_angle": 0x20, "auto_gyro": 0xFA, "auto_encoder": 0xFB, "auto_battery": 0xFC, } def connect(self): if self._serialOK == False: self.serial = Serial(self.param["port"], self.param["baud"], timeout=self.param["timeout"]) self._serialOK = True def disconnect(self): if self._serialOK == True: print("Try to disconnect ominibot car") self.serial.close() self._serialOK == False print("Done with disconnecting ominibot car!") def serial_thread(self): # Serial initialization print("========= Serial thread ==========") while(not self.t_stop.is_set()): try: reading = self.serial.read(2) #print(binascii.hexlify(reading)) except Exception as error: self.error_flag = True break #====== imu data packet (python3) ======# if reading[0] == self.respond["auto_head"] and reading[1] == self.respond["auto_gyro"]: #ser_in = self.serial.read(13) try: ser_in = self.serial.read(13) except Exception: self.error_flag = True break self.imu_decode(ser_in, 13) self._is_synced = True #debug #to_hex = lambda x: "".join("{02X}".format(ord(c)) for c in reading) #print(to_hex(b'\x03\xac23\n')) #====== encoder data packet ======# elif reading[0] == self.respond["auto_head"] and reading[1] == self.respond["auto_encoder"]: #ser_in = self.serial.read(9) try: ser_in = self.serial.read(9) except Exception: self.error_flag = True break self.odom_decode(ser_in, 7) self._is_synced = True #====== battery data packet ======# elif reading[0] == self.respond["auto_head"] and reading[1] == self.respond["auto_battery"]: #ser_in = self.serial.read(5) try: ser_in = self.serial.read(5) except Exception: self.error_flag = True break self.battery_decode(ser_in, 5) self._is_synced = True #====== lost sync ======# else: if self._is_synced == True: if self._first_odom == True or self._first_battery == True: print("Initial syncing...") self._is_synced = False continue #print("out of sync") #to_hex = lambda x: "".join("{02X}".format(ord(c)) for c in reading) #print(to_hex(b'\x03\xac23\n')) bfr = self.serial.read(1) #to_hex = lambda x: "".join("{02X}".format(ord(c)) for c in bfr) #print(to_hex(b' ', end='')) self._is_synced = False # if loop breaks with an error flag if self.error_flag == True: print("serial read error") self.serial.close() self._serialOK = False self._is_synced = False self._odom_new_data = False self._battery_new_data = False print("thread ends") raise return # if threads ends here print("Sending stoping signal to ominibot car") self.serial.close() self._serialOK = False self._is_synced = False self._odom_new_data = False self._imu_new_data = False self._battery_new_data = False print("thread ends") ###### Decode imu data ###### def imu_decode(self, data, size): # reference: https://docs.python.org/3/library/struct.html self.imu_bfr["accel"][0] = struct.unpack('>h', data[0: 2])[0] self.imu_bfr["accel"][1] = struct.unpack('>h', data[2: 4])[0] self.imu_bfr["accel"][2] = struct.unpack('>h', data[4: 6])[0] self.imu_bfr["gyro"][0] = struct.unpack('>h', data[6: 8])[0] self.imu_bfr["gyro"][1] = struct.unpack('>h', data[8: 10])[0] self.imu_bfr["gyro"][2] = struct.unpack('>h', data[10: 12])[0] self.imu_seq = struct.unpack('>B', data[12: 13])[0] #debug #print("imu",self.imu_seq) self.imu = self.imu_bfr self._imu_new_data = True ###### Decode odometry data ###### def odom_decode(self, data, size): # reference: https://docs.python.org/3/library/struct.html self.odom_bfr[0] = struct.unpack('>h', data[0: 2])[0] self.odom_bfr[1] = struct.unpack('>h', data[2: 4])[0] self.odom_bfr[2] = struct.unpack('>h', data[4: 6])[0] self.odom_bfr[3] = struct.unpack('>h', data[6: 8])[0] self.odom_seq = struct.unpack('>B', data[8: 9])[0] #debug #print("odom", self.odom_seq, self.odom[0:4]) if (self.odom_seq != ((self.last_odom_seq + 1 )%256)): if not self._first_odom: print("odom seq mismatch, prev: {}, now: {}".format(self.last_odom_seq, self.odom_seq)) if self._first_odom == True: self._first_odom = False self.last_odom_seq = self.odom_seq self.odom = self.odom_bfr self._odom_new_data = True ###### Decode battery data ###### def battery_decode(self, data, size): # reference: https://docs.python.org/3/library/struct.html self.battery_bfr[0] = struct.unpack('>h', data[0: 2])[0] self.battery_bfr[1] = struct.unpack('>h', data[2: 4])[0] self.battery_seq = struct.unpack('B', data[4: 5])[0] #debug #print("battery, voltage:{}, power:{}".format(self.battery_bfr[0], self.battery_bfr[1])) if (self.battery_seq != ((self.last_battery_seq + 1 )%256)): if not self._first_battery: print("battery seq mismatch, prev:{}, now:{}".format(self.last_battery_seq,self.battery_seq)) if self._first_battery: self._first_battery = False self.last_battery_seq = self.battery_seq self.battery = self.battery_bfr self._battery_new_data = True ###### read data 1byte decode ###### def read_data_decode_1byte(self, param_name, data, size): for number in range(size): self.system_value[param_name][number] = struct.unpack('B', data[ number: (number + 1) ])[0] return self.system_value[param_name] ###### read data 2byte decode ###### def read_data_decode_2byte(self, param_name, data, size): for number in range(int(size/2)): self.system_value[param_name][number] = struct.unpack('>H', data[ int((number * 2)): int(math.pow(2, number+1)) ])[0] return self.system_value[param_name] ###### read system mode decode ###### def system_mode_decode(self, data, size): for number in range(size): self.system_value["system_mode"][number] = struct.unpack('B', data[ number: (number + 1) ])[0] ###### read motor voltage decode ###### def motor_voltage_decode(self, data, size): self.system_value["motor_voltage"][0] = struct.unpack('>H', data[0: 2])[0] self.system_value["motor_voltage"][1] = struct.unpack('>H', data[2: 4])[0] ###### read motor voltage decode ###### def cutoff_voltage_decode(self, data, size): self.system_value["cutoff_voltage"][0] = struct.unpack('>H', data[0: 2])[0] self.system_value["cutoff_voltage"][1] = struct.unpack('>H', data[2: 4])[0] ######## Module communication from outside ###### def serialOK(self): return self._serialOK def imu_new_data(self): return self._imu_new_data def odom_new_data(self): return self._odom_new_data def battery_new_data(self): return self._battery_new_data def get_imu_data(self): if self._imu_new_data == True: # data assign self._imu_new_data = False return self.imu else: return None def get_odom_data(self): if self._odom_new_data == True: # data assign self._odom_new_data = False return {"seq": self.odom_seq, "pos_dt": self.odom} else: return None def get_battery_data(self): if self._battery_new_data == True: self._battery_new_data = False return {"seq": self.battery_seq, "battery": self.battery} else: None def stop_thread(self): self.t_stop.set() start = time.time() if self._serialOK: while self._serialOK: if (time.time() - start) > 3: self._serialOK = False self.serial.close() ############### motor control ################## def information(self): print("Omnibot car Version: {}.".format(self.__version__())) print("Mecanum wheel configure: left_front: motor 1, left_back: motor 4, right_front: motor 2, right_back: motor 4") print("Omnibot wheel configure: right_front: motor 2, left_front: motor 3, back: motor 1") print("ROSKY wheel configure: according to the Ominibot car information.") def motor_correct(self, v1=0, v2=0, v3=0, v4=0, information=False, debug=False): self.param["motor_correct"] = (v1, v2 ,v3 ,v4) if information == True: print("Your motor correct: {}".format(self.param["motor_correct"])) ## coordinate: ROS transformer def omnibot(self, Vx=0.0, Vy=0.0, Vz=0.0, information=False, debug=False, platform="omnibot"): # set direction function = { "Vx": lambda V: 0 if V >= 0 else math.pow(2, 2), "Vy": lambda V: 0 if V >= 0 else math.pow(2, 1), "Vz": lambda V: 0 if V < 0 else math.pow(2, 0), } direction = [ function["Vx"](Vx), function["Vy"](Vy), function["Vz"](Vz), ] direction = int(reduce(lambda add_x, add_y: add_x + add_y, direction)) Vx = int(round(self.clamp( abs(Vx), 0, 65536 ))) Vy = int(round(self.clamp( abs(Vx), 0, 65536 ))) Vz = int(round(self.clamp( abs(Vx), 0, 65536 ))) cmd = bytearray(b'\xFF\xFE\x01') cmd += struct.pack('>h', Vx) # 2-bytes , velocity for x axis cmd += struct.pack('>h', Vy) # 2-bytes , velocity for y axis cmd += struct.pack('>h', Vz) # 2-bytes , velocity for z axis # 1-bytes, direction for x(bit2), y(bit1), z(bit0), and 0: normal, 1: reverse cmd += struct.pack('>b', direction) if debug == True : print("send signal about {}: {} ".format(platform, binascii.hexlify(cmd))) if self._serialOK == True: self.serial.write(cmd) time.sleep(self.param["send_interval"]) def mecanum(self, Vx=0.0, Vy=0.0, Vz=0.0): self.omnibot(Vx=Vx, Vy=Vy, Vz=Vz, platform="mecanum") def individual_wheel(self, v1=0.0, v2=0.0, v3=0.0, v4=0.0, mode=0x03, information=False, debug=False): ## mode: 0x02 -> with encoder, 0x03 -> without encoder ## setting up reverse, left motors are normal direction, right motors are reverse direction function = { "v1": lambda V: math.pow(2, 2) if V < 0 else 0, "v2": lambda V: math.pow(2, 1) if V < 0 else 0, "v3": lambda V: math.pow(2, 0) if V < 0 else 0, "v4": lambda V: math.pow(2, 3) if V < 0 else 0, } direction = [ function["v1"](v1), function["v2"](v2), function["v3"](v3), function["v4"](v4), ] direction = int(reduce(lambda add_x, add_y: add_x + add_y, direction)) if mode == 0x02: speed_max = 100 speed_min = 0 elif mode == 0x03: speed_max = 10000 speed_min = 0 else: print("Mode error! Please chechout your setting(just 0x02 or 0x03).") speed = { "v1":int(round(self.clamp(abs(v1) + self.param["motor_correct"][0], speed_min, speed_max))), "v2":int(round(self.clamp(abs(v2) + self.param["motor_correct"][1], speed_min, speed_max))), "v3":int(round(self.clamp(abs(v3) + self.param["motor_correct"][2], speed_min, speed_max))), "v4":int(round(self.clamp(abs(v4) + self.param["motor_correct"][3], speed_min, speed_max))), } ## setting up wheel velocity cmd = bytearray(b'\xFF\xFE') cmd.append(mode) cmd += struct.pack('>h', speed["v1"]) # 2-bytes cmd += struct.pack('>h', speed["v2"]) # 2-bytes cmd += struct.pack('>h', speed["v3"]) # 2-bytes cmd += struct.pack('>h', speed["v4"]) # 2-bytes cmd += struct.pack('>b', direction) # 1-bytes if debug == True : print("send signal about individual_wheel: {} ".format(binascii.hexlify(cmd))) if self._serialOK == True: self.serial.write(cmd) time.sleep(self.param["send_interval"]) def rosky_diff_drive(self, left=0.0, right=0.0, alpha=-1, mode=0x02, magnification=1, information=False, debug=False): # mode : 0x02 -> with encoderm 0x03 -> without encoder # V1: rf, V2: lf, V3: rb, V4: lb speed_limit = { "max": 100 if mode == 0x02 else 10000, "min":0, } left = left if mode == 0x03 else left * alpha right = right if mode == 0x03 else right * alpha ## setting up reverse, left motors are normal direction, right motors are reverse direction function = { "right": lambda V: math.pow(2, 0) + math.pow(2, 2) if V < 0 else 0, "left" : lambda V: 0 if V < 0 else math.pow(2, 1) + math.pow(2, 3), } direction = [ function["right"](right), function["left"](left) ] direction = int(reduce(lambda add_x, add_y: add_x + add_y, direction)) ## setting up wheel velocity speed = { "v1": int(round(self.clamp(abs( (right * magnification)) + self.param["motor_correct"][0], speed_limit["min"], speed_limit["max"]))), "v2": int(round(self.clamp(abs( (left * magnification)) + self.param["motor_correct"][1], speed_limit["min"], speed_limit["max"]))), "v3": int(round(self.clamp(abs( (right * magnification)) + self.param["motor_correct"][2], speed_limit["min"], speed_limit["max"]))), "v4": int(round(self.clamp(abs( (left * magnification)) + self.param["motor_correct"][3], speed_limit["min"], speed_limit["max"]))), } cmd = bytearray(b'\xFF\xFE') cmd.append(mode) cmd += struct.pack('>h',speed["v1"]) # 2-bytes cmd += struct.pack('>h',speed["v2"]) # 2-bytes cmd += struct.pack('>h',speed["v3"]) # 2-bytes cmd += struct.pack('>h',speed["v4"]) # 2-bytes cmd += struct.pack('>b',direction) # 1-bytes if debug == True : print("send signal about rosky_diff_drive: {} ".format(binascii.hexlify(cmd))) if self._serialOK == True: self.serial.write(cmd) time.sleep(self.param["send_interval"]) def clamp(self,value=0.0, _min=0.0, _max=0.0): return max(min(_max, value), _min) def set_mode_A(self, param_name, number=50, information=False, debug=False): item = { "load_setup": 0x01, "initialize": 0x02, "write_setting": 0x03, "gyro_compensate_off": 0x04, "gyro_compensate_on": 0x05, "gyro_compensate_restart": 0x06, } if param_name in item: Tx8 = item.get(param_name) else: print("Please check out your param name in\n {}".format(list(item.keys()))) return # send signal Tx0 Tx1 Tx2 Tx3 Tx4 Tx5 Tx6 Tx7 cmd = bytearray(b'\xFF\xFE\x80\x80\x00\x80\x00\x00') cmd.append(Tx8) cmd.append(0x00) if debug == True : print("send signal about {}: {} ".format(param_name, binascii.hexlify(cmd))) if self._serialOK == True: for index in range(number): self.serial.write(cmd) time.sleep(self.param["send_interval"]) return def load_setup(self, number=50, information=False, debug=False): self.set_mode_A(param_name="load_setup", number=number, information=information, debug=debug) def initialize(self, number=50, information=False, debug=False): self.set_mode_A(param_name="initialize", number=number, information=information, debug=debug) def write_setting(self, number=50 ,information=False, debug=False): self.set_mode_A(param_name="write_setting", number=number, information=information, debug=debug) def gyro_compensate(self, switch="off", number=50, information=False, debug=False): param_name = "" if switch == "off": param_name = "gyro_compensate_off" elif switch == "on": param_name = "gyro_compensate_on" elif switch == "restart": param_name = "gyro_compensate_restart" else: print("Error. Please restart your code and ominibot car.") self.error_flag = True self.set_mode_A(param_name=param_name, number=number, information=information, debug=debug) def set_mode_B(self, param_name, value_1=0, value_2=0, number=50, information=False, debug=False): item = { "speed_limit": 0x01, "location_limit": 0x02, "location_kp": 0x03, "location_ki": 0x04, "location_kd": 0x05, "speed_kp": 0x06, "speed_ki": 0x07, "gyro_compensate": 0x08, "system_mode": 0x09, "gyro_correct": 0x0A, "motor_voltage": 0x0B, "battery_voltage": 0x0C, } item_value_byte_2 = ["speed_limit", "location_limit", "gyro_correct", "motor_voltage"] item_value_byte_4 = ["location_kp", "location_ki", "location_kd", "speed_kp", "speed_ki", "gyro_compensate", "system_mode", "location_kp"] item_value_2 = ["battery_voltage"] if param_name in item: Tx4 = item.get(param_name) else: print("Please check out your param name in\n {}".format(list(item.keys()))) return # send signal Tx0 Tx1 Tx2 Tx3 cmd = bytearray(b'\xFF\xFE\x80\x80') cmd.append(Tx4) if param_name in item_value_byte_2: cmd.append(0x00) # Tx5 cmd.append(0x00) # Tx6 cmd += struct.pack('>H',int(value_1)) # Tx7, Tx8 elif param_name in item_value_byte_4: cmd += struct.pack('>I',int(value_1)) # Tx5, Tx6, Tx7, Tx8 elif param_name in item_value_2: cmd += struct.pack('>H',int(value_1)) # Tx5, Tx6 cmd += struct.pack('>H',int(value_2)) # Tx7, Tx8 else: print("Error") cmd.append(0x00) # Tx9 if debug == True : print("send signal about {}: {} ".format(param_name, binascii.hexlify(cmd))) if self._serialOK == True: print("Setting {}...".format(param_name)) for index in range(number): self.serial.write(cmd) time.sleep(self.param["send_interval"]) if information == True: self.read_data(param_name=param_name, information=True) return def set_speed_limit(self, speed, information=False, debug=False): self.set_mode_B(param_name="speed_limit", value_1=int(speed), information=information, debug=debug) def set_location_limit(self, location, information=False, debug=False): self.set_mode_B(param_name="location_limit", value_1=int(location), information=information, debug=debug) def set_location_PID(self, controller, gain, information=False, debug=False): _controller = ["kp", "ki", "kd"] if controller in _controller: if controller == "kp": param_name = "location_kp" elif controller == "ki": param_name = "location_ki" elif controller == "kd": param_name = "location_kd" else: print("Error! Please check out your controller, just can type: {}.".format(_controller)) return self.set_mode_B(param_name=param_name, value_1=int(gain), information=information, debug=debug) def set_speed_PI(self, controller, gain, information=False, debug=False): _controller = ["kp", "ki"] if controller in _controller: if controller == "kp": param_name = "speed_kp" elif controller == "ki": param_name = "speed_ki" else: print("Error! Please check out your controller, just can type: {}.".format(_controller)) return self.set_mode_B(param_name=param_name, value_1=int(gain), information=information, debug=debug) def set_gyro_compensate_param(self, value, information=False, debug=False): self.set_mode_B(param_name="gyro_compensate", value_1=int(value), information=information, debug=debug) #================ set system node =============== # vehicle (Bit0) : 0 -> omnibot, 1-> Mecanum, 2-> individual with encoder, 3-> individual without encoder # imu (Bit3) : 0 -> not to do , 1 -> do it # imu_axis (Bit4) : 0 -> not to do , 1 -> do it # motor_direct (Bit8) : 0 -> normal , 1 -> reverse # encoder_direct(Bit9) : 0 -> normal , 1 -> reverse # turn_direct (Bit10) : 0 -> normal , 1 -> reverse # imu_reverse (Bit11) : 0 -> normal , 1 -> reverse #================================================ def set_system_mode(self, information=False, debug=False, platform=None, vehicle=0, imu_correct=False, imu_axis_correct=False, motor_reverse=False, encoder_reverse=False, turn_reverse=False, imu_reverse=False): _platform = { "omnibot": 0, "mecanum": 1, "four_wheel": 2, } if platform in _platform.keys(): vehicle = _platform.get(platform, 0 ) else: if platform == None: print("Please choose platform: {} ".format(list(_platform))) return else: print("We don't have platform [{}]. Please choose platform below: ".format(platform)) print(list(_platform.keys())) return calculate={ "vehicle" : lambda setting : setting, "imu" : lambda setting : 0 if setting == False else math.pow(2, 3), "imu_axis" : lambda setting : 0 if setting == False else math.pow(2, 4), "motor_direct" : lambda setting : 0 if setting == False else math.pow(2, 8), "encoder_direct": lambda setting : 0 if setting == False else math.pow(2, 9), "turn_direct" : lambda setting : 0 if setting == False else math.pow(2, 10), "imu_direct" : lambda setting : 0 if setting == False else math.pow(2, 11), } mode = [ calculate["vehicle"](vehicle), calculate["imu"](imu_correct), calculate["imu_axis"](imu_axis_correct), calculate["motor_direct"](motor_reverse), calculate["encoder_direct"](encoder_reverse), calculate["turn_direct"](turn_reverse), calculate["imu_direct"](imu_reverse), ] mode = int(reduce(lambda add_x, add_y: add_x + add_y, mode)) cmd = bytearray(b'\xFF\xFE\x80\x80\x09\x00\x00') # Tx[0]~Tx[6] cmd += struct.pack('>h', mode) # Tx[7] ,Tx[8] cmd.append(0x00) # Tx[9] if debug == True : print("send signal about set system mode: {} ".format(binascii.hexlify(cmd))) if self._serialOK == True: for index in range(5): self.serial.write(cmd) time.sleep(0.01) if information == True: print("Your platform now setting: {} ".format(platform)) return def set_gyro_correct(self, value, information=False, debug=False): self.set_mode_B(param_name="gyro_correct", value_1=int(value), information=information, debug=debug) def set_motor_voltage(self, voltage=5, information=False, debug=False): voltage = int(voltage * 100) self.set_mode_B(param_name="motor_voltage", value_1=voltage, information=information, debug=debug) def set_battery_voltage(self, full=12.6, cut=11.1, information=False, debug=False): full = int(full * 100) cut = int(cut * 100) self.set_mode_B(param_name="battery_voltage", value_1=full, value_2=cut, information=information, debug=debug) def read_data(self, param_name, information=False, debug=False): if param_name in self.system_value: if not param_name == "head" or param_name == "auto_head": Tx4 = { "speed_limit": 0x11, "location_limit": 0x12, "location_kp": 0x13, "location_ki": 0x14, "location_kd": 0x15, "speed_kp": 0x16, "speed_ki": 0x17, "gyro_compensate": 0x18, "system_mode": 0x19, "gyro_correct": 0x1A, "motor_voltage": 0x1B, "battery_voltage": 0x1C, "gyro_turn_angle": 0x20, }.get(param_name, 0) else: print("Please check out your param name in\n {}".format(list(self.system_value.keys()))) return else: print("Please check out your param name in\n {}".format(list(self.system_value.keys()))) return # send signal Tx0 Tx1 Tx2 Tx3 cmd = bytearray(b'\xFF\xFE\x80\x80') cmd.append(Tx4) # Tx4 for index in range(5, 10, 1): # Tx5 ~ Tx9 cmd.append(0x00) if debug == True : print("send signal about {}: {} ".format(param_name, binascii.hexlify(cmd))) if self._serialOK == True: start = time.time() interval = time.time() - start _read = False while(interval < self.param["interrupt_time"]): try: self.serial.write(cmd) reading = self.serial.read(2) if reading[0] == self.respond["head"] and reading[1] == self.respond[param_name]: _read = True break else: interval = time.time() - start time.sleep(0.01) except Exception: self.error_flag = True break if _read == True: try: ser_in = self.serial.read(4) except Exception as error: self.error_flag = True if param_name == "motor_voltage" or param_name == "battery_voltage": data_decode = self.read_data_decode_2byte(param_name, ser_in, 4) else: data_decode = self.read_data_decode_1byte(param_name, ser_in, 4) if information == True: print("{}: {}".format(param_name, data_decode)) else: return data_decode else: print("Warn! Can not get {}. Please disconnect and try again.".format(param_name)) return def read_speed_limit(self, information=False, debug=False): self.read_data(param_name="speed_limit", information=information, debug=debug) def read_location_limit(self, information=False, debug=False): self.read_data(param_name="location_limit", information=information, debug=debug) def read_location_kp(self, information=False, debug=False): self.read_data(param_name="location_kp", information=information, debug=debug) def read_location_ki(self, information=False, debug=False): self.read_data(param_name="location_ki", information=information, debug=debug) def read_location_kd(self, information=False, debug=False): self.read_data(param_name="location_kd", information=information, debug=debug) def read_speed_kp(self, information=False, debug=False): self.read_data(param_name="speed_kp", information=information, debug=debug) def read_speed_ki(self, information=False, debug=False): self.read_data(param_name="speed_ki", information=information, debug=debug) def read_gyro_compensate(self, information=False, debug=False): self.read_data(param_name="gyro_compensate", information=information, debug=debug) def read_system_mode(self, information=False, debug=False): self.read_data(param_name="system_mode", information=information, debug=debug) def read_gyro_correct(self, information=False, debug=False): self.read_data(param_name="gyro_correct", information=information, debug=debug) def read_motor_voltage(self, information=False, debug=False): self.read_data(param_name="motor_voltage", information=information, debug=debug) def read_battery_voltage(self, information=False, debug=False): self.read_data(param_name="battery_voltage", information=information, debug=debug) def __version__(self, information=False): if information == True: print("Firmware date : 2020.12.10.") print("Firmware version : V0.08.") print("Firmware system item: {}.".format(list(self.system_value.keys()))) print("Driver developer : Wei-Chih Lin, github: https://github.com/kjoelovelife") print("More information : https://github.com/CIRCUSPi/OminiBotHV") return else: return "V0.08" if __name__ == '__main__': #### test code #### _port = "/dev/ominibot_car" _baud = 115200 ominibot = Ominibot_Car(_port,_baud, py_version=3) #ominibot.set_system_mode(platform="omnibot") #ominibot.__version__(information=True) #ominibot.set_battery_voltage(cut=11.1, information=True) #ominibot.read_battery_voltage(information=True) #motor_driver.set_cutoff_voltage(11.1) #motor_driver.set_motor_voltage(7.4) #ominibot.gyro_compensate(switch="off", information=True, debug=True) ###### auto read information example and control motor ###### ''' try: thread = threading.Thread(target=motor_driver.serial_thread) thread.start() except: print("error") motor_driver.stop_thread() sys.exit(0) start = time.time() end = time.time() interval = end - start while(interval<3): battery = motor_driver.get_battery_data() imu = motor_driver.get_imu_data() odom = motor_driver.get_odom_data() print(battery) print(imu) print(odom) motor_driver.rosky_diff_drive(left=40,right=40, mode=0x02) time.sleep(1) end = time.time() interval = end - start motor_driver.stop_thread() ''' ###### motor control example ###### #ominibot.set_system_mode(platform="omnibot") start = time.time() end = time.time() interval = end - start while(interval< 10): # left: left side, right: right side # mode=0x02: with encode, mode=0x03: without encode # ominibot.mecanum(-30,0,0) #ominibot.individual_wheel(30,0,0) ominibot.motor_correct(2000, 2000, 3000, 3000) ominibot.rosky_diff_drive(500, 500, mode=0x03) end = time.time() interval = end - start
async_pool_executor.py
import atexit import os import asyncio import time import traceback from threading import Thread, Event import nb_log # noqa from function_scheduling_distributed_framework.utils.develop_log import develop_logger # noqa if os.name == 'posix': import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) """ # 也可以采用 janus 的 线程安全的queue方式来实现异步池,此queue性能和本模块实现的生产 消费相比,性能并没有提高,所以就不重新用这这个包来实现一次了。 import janus import asyncio import time import threading import nb_log queue = janus.Queue(maxsize=6000) async def consume(): while 1: # time.sleep(1) val = await queue.async_q.get() # 这是async,不要看错了 print(val) def push(): for i in range(50000): # time.sleep(0.2) # print(i) queue.sync_q.put(i) # 这是aync。不要看错了。 if __name__ == '__main__': threading.Thread(target=push).start() loop = asyncio.get_event_loop() loop.create_task(consume()) loop.run_forever() """ class AsyncPoolExecutor2: def __init__(self, size, loop=None): self._size = size self.loop = loop or asyncio.new_event_loop() self._sem = asyncio.Semaphore(self._size, loop=self.loop) # atexit.register(self.shutdown) Thread(target=self._start_loop_in_new_thread).start() def submit(self, func, *args, **kwargs): while self._sem.locked(): time.sleep(0.01) asyncio.run_coroutine_threadsafe(self._run_func(func, *args, **kwargs), self.loop) async def _run_func(self, func, *args, **kwargs): async with self._sem: result = await func(*args, **kwargs) return result def _start_loop_in_new_thread(self, ): self.loop.run_forever() def shutdown(self): self.loop.stop() self.loop.close() class AsyncPoolExecutor: """ 使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。 """ def __init__(self, size, loop=None): """ :param size: 同时并发运行的协程任务数量。 :param loop: """ self._size = size self.loop = loop or asyncio.new_event_loop() self._sem = asyncio.Semaphore(self._size, loop=self.loop) self._queue = asyncio.Queue(maxsize=size, loop=self.loop) t = Thread(target=self._start_loop_in_new_thread) t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown t.start() self._can_be_closed_flag = False atexit.register(self.shutdown) def submit2(self, func, *args, **kwargs): # future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个方法也有缺点,消耗的性能巨大。 # future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 # asyncio.ensure_future(self._produce(func, *args, **kwargs),loop=self.loop) # 这样快,但不能阻塞导致快速放入。 # 这个submit提交方法性能比submit2的 run_coroutine_threadsafe 性能好 while self._queue.full(): time.sleep(0.0001) asyncio.ensure_future(self._produce(func, *args, **kwargs), loop=self.loop) def submit(self, func, *args, **kwargs): future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。 future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 async def _produce(self, func, *args, **kwargs): await self._queue.put((func, args, kwargs)) async def _consume(self): while True: func, args, kwargs = await self._queue.get() if func == 'stop': break try: await func(*args, **kwargs) except Exception as e: traceback.print_exc() async def __run(self): for _ in range(self._size): asyncio.ensure_future(self._consume()) def _start_loop_in_new_thread(self, ): # self._loop.run_until_complete(self.__run()) # 这种也可以。 # self._loop.run_forever() # asyncio.set_event_loop(self.loop) self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop)) self._can_be_closed_flag = True def shutdown(self): for _ in range(self._size): self.submit('stop', ) while not self._can_be_closed_flag: time.sleep(0.1) self.loop.close() print('关闭循环') class AsyncProducerConsumer: """ 参考 https://asyncio.readthedocs.io/en/latest/producer_consumer.html 官方文档。 A simple producer/consumer example, using an asyncio.Queue: """ """ 边生产边消费。此框架没用到这个类,这个要求生产和消费在同一个线程里面,对原有同步方式的框架代码改造不方便。 """ def __init__(self, items, concurrent_num=200, consume_fun_specify=None): """ :param items: 要消费的参数列表 :param time_interval_produce: 添加任务的时间间隔 :param consume_fun_specify: 指定的异步消费函数对象,如果不指定就要继承并重写consume_fun函数。 """ self.queue = asyncio.Queue() self.items = items self._concurrent_num = concurrent_num self.consume_fun_specify = consume_fun_specify async def produce(self): for item in self.items: await self.queue.put(item) async def consume(self): while True: # wait for an item from the producer item = await self.queue.get() # process the item # print('consuming {}...'.format(item)) # simulate i/o operation using sleep try: if self.consume_fun_specify: await self.consume_fun_specify(item) else: await self.consume_fun(item) except Exception as e: print(e) # Notify the queue that the item has been processed self.queue.task_done() @staticmethod async def consume_fun(item): """ 要么继承此类重写此方法,要么在类的初始化时候指定consume_fun_specify为一个异步函数。 :param item: :return: """ print(item, '请重写 consume_fun 方法') await asyncio.sleep(1) async def __run(self): # schedule the consumer tasks = [] for _ in range(self._concurrent_num): task = asyncio.ensure_future(self.consume()) tasks.append(task) # run the producer and wait for completion await self.produce() # wait until the consumer has processed all items await self.queue.join() # the consumer is still awaiting for an item, cancel it for task in tasks: task.cancel() def start_run(self): loop = asyncio.get_event_loop() loop.run_until_complete(self.__run()) # loop.close() if __name__ == '__main__': def test_async_pool_executor(): from function_scheduling_distributed_framework.concurrent_pool import CustomThreadPoolExecutor as ThreadPoolExecutor async def f(x): print('打印', x) # await asyncio.sleep(1) # raise Exception('aaa') def f2(x): print('打印', x) print(1111) pool = AsyncPoolExecutor(200) # pool = ThreadPoolExecutor(200) # 协程不能用线程池运行,否则压根不会执行print打印,对于一部函数 f(x)得到的是一个协程,必须进一步把协程编排成任务放在loop循环里面运行。 for i in range(1, 50001): print('放入', i) pool.submit(f, i) # time.sleep(5) pool.submit(f, 'hi') pool.submit(f, 'hi2') pool.submit(f, 'hi3') print(2222) # pool.shutdown() async def _my_fun(item): print('嘻嘻', item) # await asyncio.sleep(1) def test_async_producer_consumer(): AsyncProducerConsumer([i for i in range(100000)], concurrent_num=200, consume_fun_specify=_my_fun).start_run() print('over') test_async_pool_executor() # test_async_producer_consumer()
ultimate.py
# -*- coding: utf-8 -*- import schedule import time import sys import os import random import glob # ->added to make pics upload -> see job8 import threading # ->added to make multithreadening possible -> see fn run_threaded sys.path.append(os.path.join(sys.path[0], '../../')) from instabot import Bot import config bot = Bot(comments_file=config.COMMENTS_FILE, blacklist=config.BLACKLIST_FILE, whitelist=config.WHITELIST_FILE) bot.login() bot.logger.info("ULTIMATE script. 24hours save") random_user_file = bot.read_list_from_file(config.USERS_FILE) random_hashtag_file = bot.read_list_from_file(config.HASHTAGS_FILE) photo_captions = bot.read_list_from_file(config.PHOTO_CAPTIONS_FILE) # to get pics and autopost it posted_pic_list = [] try: with open(config.POSTED_PICS_FILE, 'r') as f: posted_pic_list = f.read().splitlines() except Exception: posted_pic_list = [] # Get the filenames of the photos in the path -> pics = [os.path.basename(x) for x in glob.glob(config.PICS_PATH + "/*.jpg")] pics = sorted(pics) # Return a random value from a list, used in various jobs below def get_random(from_list): _random = random.choice(from_list) return _random def stats(): bot.save_user_stats(bot.user_id) def job1(): bot.like_hashtag(get_random(random_hashtag_file), amount=int(700 / 24)) def job2(): bot.like_timeline(amount=int(300 / 24)) def job3(): bot.like_followers(get_random(random_user_file), nlikes=3) def job4(): bot.follow_followers(get_random(random_user_file), nfollows=config.NUMBER_OF_FOLLOWERS_TO_FOLLOW) def job5(): bot.comment_medias(bot.get_timeline_medias()) def job6(): bot.unfollow_non_followers(n_to_unfollows=config.NUMBER_OF_NON_FOLLOWERS_TO_UNFOLLOW) def job7(): bot.follow_users(bot.get_hashtag_users(get_random(random_hashtag_file))) def job8(): # Comment posts with an hashtag in HASHTAGS_FILE hashtag = get_random(random_hashtag_file) bot.logger.info("Commenting on hashtag: " + hashtag) bot.comment_hashtag(hashtag) def job9(): # Automatically post a pic in 'pics' folder try: for pic in pics: if pic in posted_pic_list: continue caption = get_random(photo_captions) full_caption = caption + "\n" + config.FOLLOW_MESSAGE bot.logger.info("Uploading pic with caption: " + caption) bot.uploadPhoto(config.PICS_PATH + pic, caption=full_caption) if bot.LastResponse.status_code != 200: bot.logger.error("Something went wrong, read the following ->\n") bot.logger.error(bot.LastResponse) break if pic not in posted_pic_list: # After posting a pic, comment it with all the hashtags specified # In config.PICS_HASHTAGS posted_pic_list.append(pic) with open('pics.txt', 'a') as f: f.write(pic + "\n") bot.logger.info("Succesfully uploaded: " + pic) bot.logger.info("Commenting uploaded photo with hashtags...") medias = bot.get_your_medias() last_photo = medias[0] # Get the last photo posted bot.comment(last_photo, config.PICS_HASHTAGS) break except Exception as e: bot.logger.error("Couldn't upload pic") bot.logger.error(str(e)) def job10(): # put non followers on blacklist try: bot.logger.info("Creating non-followers list") followings = bot.get_user_following(bot.user_id) # getting following followers = bot.get_user_followers(bot.user_id) # getting followers friends_file = bot.read_list_from_file("friends.txt") # same whitelist (just user ids) nonfollowerslist = list((set(followings) - set(followers)) - set(friends_file)) with open(config.BLACKLIST_FILE, 'a') as file: # writing to the blacklist for user_id in nonfollowerslist: file.write(str(user_id) + "\n") bot.logger.info("Removing duplicates...") lines = open(config.BLACKLIST_FILE, 'r').readlines() lines_set = set(lines) out = open(config.BLACKLIST_FILE, 'w') for line in lines_set: out.write(line) bot.logger.info("Done.") except Exception as e: bot.logger.error("Couldn't update blacklist") bot.logger.error(str(e)) # function to make threads -> details here http://bit.ly/faq_schedule def run_threaded(job_fn): job_thread = threading.Thread(target=job_fn) job_thread.start() schedule.every(1).hour.do(run_threaded, stats) # get stats schedule.every(8).hours.do(run_threaded, job1) # like hashtag schedule.every(2).hours.do(run_threaded, job2) # like timeline schedule.every(1).days.at("16:00").do(run_threaded, job3) # like followers of users from file schedule.every(2).days.at("11:00").do(run_threaded, job4) # follow followers schedule.every(16).hours.do(run_threaded, job5) # comment medias schedule.every(1).days.at("08:00").do(run_threaded, job6) # unfollow non-followers schedule.every(12).hours.do(run_threaded, job7) # follow users from hashtag from file schedule.every(6).hours.do(run_threaded, job8) # comment hashtag schedule.every(1).days.at("21:28").do(run_threaded, job9) # upload pics schedule.every(4).days.at("07:50").do(run_threaded, job10) # non-followers blacklist while True: schedule.run_pending() time.sleep(1)
preprocess.py
import numpy as np from random import shuffle import os import scipy.io as io from sklearn.preprocessing import OneHotEncoder import argparse from helper import * import threading import time import itertools import sys parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default='Indian_pines') parser.add_argument('--patch_size', type=int, default=3) parser.add_argument('--mode', type=str, default='Default') opt = parser.parse_args() #Somehow this is necessary, even I cannot tell why -_- if opt.data in ('KSC', 'Botswana'): filename = opt.data else: filename = opt.data.lower() print("Dataset: " + filename ) #Try loading data from the folder... Otherwise download from online try: print("Using images from Data folder...") input_mat = io.loadmat('./data/' + opt.data + '.mat')[filename] target_mat = io.loadmat('./data/' + opt.data + '_gt.mat')[filename + '_gt'] except: print("Data not found, downloading input images and labelled images!\n\n") if opt.data == "Indian_pines": opt.url1 = "http://www.ehu.eus/ccwintco/uploads/2/22/Indian_pines.mat" opt.url2 = "http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat" elif opt.data == "Salinas": opt.url1 = "http://www.ehu.eus/ccwintco/uploads/f/f1/Salinas.mat" opt.url2 = "http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat" elif opt.data == "KSC": opt.url1 = "http://www.ehu.eus/ccwintco/uploads/2/26/KSC.mat" opt.url2 = "http://www.ehu.eus/ccwintco/uploads/a/a6/KSC_gt.mat" elif opt.data == "Botswana": opt.url1 = "http://www.ehu.eus/ccwintco/uploads/7/72/Botswana.mat" opt.url2 = "http://www.ehu.eus/ccwintco/uploads/5/58/Botswana_gt.mat" else: raise Exception("Available datasets are:: Indian_pines, Salinas, KSC, Botswana") os.system('wget -P' + ' ' + './data/' + ' ' + opt.url1) os.system('wget -P' + ' ' + './data/' + ' ' + opt.url2) input_mat = io.loadmat('./data/' + opt.data + '.mat')[filename] target_mat = io.loadmat('./data/' + opt.data + '_gt.mat')[filename + '_gt'] PATCH_SIZE = opt.patch_size HEIGHT = input_mat.shape[0] WIDTH = input_mat.shape[1] BAND = input_mat.shape[2] CLASSES = [] COUNT = 200 #Number of patches of each class OUTPUT_CLASSES = np.max(target_mat) mode = opt.mode print("MODE : " + mode ) ''' For debug use, uncomment to see image information print("+-------------------------------------+") print("MODE : " + mode ) print("Patch size", PATCH_SIZE) print("Number of output classes: " +str(OUTPUT_CLASSES)) print("Lower number of classes: " +str(np.min(target_mat)) ) print("Input Height: " + str(HEIGHT)) print("Input Width: " + str(WIDTH)) print("Frequency dimension (Band) " +str(BAND)) print('Target_mat shape = (' + str(target_mat.shape[0]) + ',' + str(target_mat.shape[1])+')') print("+-------------------------------------+\n") ''' # Normalise image data input_mat = input_mat.astype(float) statlie_image = input_mat input_mat -= np.min(input_mat) input_mat /= np.max(input_mat) statlie_image_2 = input_mat # List label = list of class # train_idx = list of numbers, each number represent number of samples in each class. if mode == 'small': if opt.data == 'Indian_pines': list_labels = [2,3,5,6,8,10,11,12,14] train_idx = [178, 178, 178, 178, 178, 178, 178, 178, 178] #Sum = 1600 elif opt.data == 'Salinas': list_labels = range(1,OUTPUT_CLASSES+1) train_idx = [170]*OUTPUT_CLASSES #was 175 else: raise Exception("KSC or Botswana does not offer 'small' mode, try removing '--size small' from command line") else: if opt.data == "Indian_pines": # There's some classes with lack of samples so we only using the 9 that has sufficient samples list_labels = [2,3,5,6,8,10,11,12,14] train_idx = [800, 600, 275, 350, 275, 450, 850, 430, 750] #Average elif opt.data == "Salinas": list_labels = range(1,OUTPUT_CLASSES+1) train_idx = [750]*OUTPUT_CLASSES #was 175 # train_idx = [1500,2500, 1300,800, 1800, 2800, 2200, 6000, 3000, 2300, 500,1300, 500] elif opt.data == 'KSC': print('Please be patent..........') list_labels = [1,2,3,4,6,8,9,10,11,12,13] train_idx = [300,200,200,200,190,350,400,340,340,420,600] elif opt.data == 'Botswana': print('Please be patent..........') list_labels = [1,3,4,5,6,7,8,9,10,11,13] train_idx = [200,200,165,190,190,190,150,225,190,220,110,200] else: print("Impossible!") def Patch(height_index,width_index): """ Returns a mean-normalized patch, the top left corner of which is at (height_index, width_index) Inputs: height_index - row index of the top left corner of the image patch width_index - column index of the top left corner of the image patch Outputs: mean_normalized_patch - mean normalized patch of size (PATCH_SIZE, PATCH_SIZE) whose top left corner is at (height_index, width_index) """ # transpose_array = np.transpose(input_mat,(2,0,1)) transpose_array = input_mat height_slice = slice(height_index, height_index+PATCH_SIZE) width_slice = slice(width_index, width_index+PATCH_SIZE) patch = transpose_array[:, height_slice, width_slice] mean_normalized_patch = [] for i in range(patch.shape[0]): mean_normalized_patch.append(patch[i] - MEAN_ARRAY[i]) return np.array(mean_normalized_patch) # For showing a animation only end_loading = False def animate(): global end_loading for c in itertools.cycle(['|', '/', '-', '\\']): if end_loading: break sys.stdout.write('\rExtracting '+ opt.data + ' dataset features...' + c) sys.stdout.flush() time.sleep(0.1) sys.stdout.write('\rFinished!\t') print("+-------------------------------------+") print('Input_mat shape: ' + str(input_mat.shape) ) MEAN_ARRAY = np.ndarray(shape=(BAND,),dtype=float) new_input_mat = [] input_mat = np.transpose(input_mat,(2,0,1)) statlie_image_3 = input_mat print('Input mat after transpose shape: ' + str(input_mat.shape) ) calib_value_for_padding = int( (PATCH_SIZE-1)/2) for i in range(BAND): MEAN_ARRAY[i] = np.mean(input_mat[i,:,:]) new_input_mat.append(np.pad(input_mat[i,:,:],calib_value_for_padding,'constant',constant_values = 0)) print('Input_mat shape after padding: ' + str( np.array(new_input_mat).shape) ) print("+-------------------------------------+") input_mat = np.array(new_input_mat) class_label_counter = [0] * OUTPUT_CLASSES #Class that for i in range(OUTPUT_CLASSES): CLASSES.append([]) t = threading.Thread(target=animate).start() start = time.time() calib_value = int((PATCH_SIZE-1)/2) count = 0 image = [] image_label = [] problem_data_set = [] for i in range(HEIGHT-1): for j in range(WIDTH-1): curr_inp = Patch(i,j) curr_tar = target_mat[i , j] if(curr_tar!=0): # Ignore patches with unknown landcover type for the central pixel CLASSES[curr_tar-1].append(curr_inp) class_label_counter[curr_tar-1] += 1 count += 1 end_loading = True end = time.time() print("Total excution time..." + str(end-start)+'seconds') print('Total number of K (things that can be identified): ' + str(count)) showClassTable(class_label_counter) TRAIN_PATCH,TRAIN_LABELS,TEST_PATCH,TEST_LABELS,VAL_PATCH, VAL_LABELS = [],[],[],[],[],[] # FULL_TRAIN_PATCH = [] # FULL_TRAIN_LABELS = [] count = 0 # Ringo's version counter = 0 #Represent train_index position for i, data in enumerate(CLASSES): if i+1 in list_labels: shuffle(data) print('Class '+ str(i+1)+ ' is accepted') #Size of validation set = 15% of training set val_size = int(train_idx[counter]*0.15) #index position between validation and test test_cutoff = train_idx[counter] + val_size TRAIN_PATCH += data[:train_idx[counter]] TRAIN_LABELS += [counter] * train_idx[counter] #print('Check equal', len(TRAIN_PATCH), len(TRAIN_LABELS) ) VAL_PATCH += data[train_idx[counter]:test_cutoff] VAL_LABELS += [counter] * val_size TEST_PATCH += data[test_cutoff:] tail_length = len(data)-test_cutoff TEST_LABELS += [counter] * tail_length counter += 1 else: print('-Class '+ str(i+1)+ ' is rejected') #FULL_TRAIN_LABELS = TRAIN_LABELS + VAL_LABELS #FULL_TRAIN_PATCH = TRAIN_PATCH + VAL_PATCH TRAIN_LABELS = np.array(TRAIN_LABELS) TRAIN_PATCH = np.array(TRAIN_PATCH) TEST_PATCH = np.array(TEST_PATCH) TEST_LABELS = np.array(TEST_LABELS) VAL_PATCH = np.array(VAL_PATCH) VAL_LABELS = np.array(VAL_LABELS) #FULL_TRAIN_LABELS = np.array(FULL_TRAIN_LABELS) #FULL_TRAIN_PATCH = np.array(FULL_TRAIN_PATCH) #print('Train, Test, Validation, Full_train', (len(TRAIN_PATCH)), (len(TEST_PATCH)), (len(VAL_PATCH)), (len(FULL_TRAIN_PATCH))); #TODO: print size print("+-------------------------------------+") print("Size of Training data: " + str(len(TRAIN_PATCH)) ) print("Size of Validation data: " + str(len(VAL_PATCH)) ) print("Size of Testing data: " + str(len(TEST_PATCH)) ) print("+-------------------------------------+") train_idx = list(range(len(TRAIN_PATCH))) shuffle(train_idx) TRAIN_PATCH = TRAIN_PATCH[train_idx] TRAIN_LABELS = TRAIN_LABELS[train_idx] test_idx = list(range(len(TEST_PATCH))) shuffle(test_idx) TEST_PATCH = TEST_PATCH[test_idx] TEST_LABELS = TEST_LABELS[test_idx] val_idx = list(range(len(VAL_PATCH))) shuffle(val_idx) VAL_PATCH = VAL_PATCH[val_idx] VAL_LABELS = VAL_LABELS[val_idx] ''' full_train_idx = shuffle(range(len(FULL_TRAIN_PATCH))) FULL_TRAIN_PATCH = FULL_TRAIN_PATCH[full_train_idx] FULL_TRAIN_LABELS = FULL_TRAIN_LABELS[full_train_idx] ''' onehot_encoder = OneHotEncoder(sparse=False) TRAIN_LABELS = np.reshape(TRAIN_LABELS, (len(TRAIN_LABELS),1) ) TRAIN_LABELS = onehot_encoder.fit_transform(TRAIN_LABELS).astype(np.uint8) TRAIN_PATCH = np.transpose(TRAIN_PATCH,(0,2,3,1)).astype(np.float32) train = {} train["train_patch"] = TRAIN_PATCH train["train_labels"] = TRAIN_LABELS io.savemat("./data/" + opt.data + "_Train_patch_" + str(PATCH_SIZE) + ".mat", train) TEST_LABELS = np.reshape(TEST_LABELS, (len(TEST_LABELS),1) ) TEST_LABELS = onehot_encoder.fit_transform(TEST_LABELS).astype(np.uint8) TEST_PATCH = np.transpose(TEST_PATCH,(0,2,3,1)).astype(np.float32) test = {} test["test_patch"] = TEST_PATCH test["test_labels"] = TEST_LABELS io.savemat("./data/" + opt.data + "_Test_patch_" + str(PATCH_SIZE) + ".mat", test) VAL_LABELS = np.reshape(VAL_LABELS, (len(VAL_LABELS),1) ) VAL_LABELS = onehot_encoder.fit_transform(VAL_LABELS).astype(np.uint8) VAL_PATCH = np.transpose(VAL_PATCH,(0,2,3,1)).astype(np.float32) val = {} val["val_patch"] = VAL_PATCH val["val_labels"] = VAL_LABELS io.savemat("./data/" + opt.data + "_Val_patch_" + str(PATCH_SIZE) + ".mat", val) ''' FULL_TRAIN_LABELS = FULL_TRAIN_LABELS.T FULL_TRAIN_LABELS = np.reshape(FULL_TRAIN_LABELS, (len(FULL_TRAIN_LABELS),1) ) FULL_TRAIN_LABELS = onehot_encoder.fit_transform(FULL_TRAIN_LABELS).astype(np.uint8) full_train = {} full_train["train_patch"] = FULL_TRAIN_PATCH full_train["train_labels"] = FULL_TRAIN_LABELS ''' print("+-------------------------------------+") print("Summary") print('Train_patch.shape: '+ str(TRAIN_PATCH.shape) ) print('Train_label.shape: '+ str(TRAIN_LABELS.shape) ) print('Test_patch.shape: ' + str(TEST_PATCH.shape)) print('Test_label.shape: ' + str(TEST_LABELS.shape)) print("Validation batch Shape: " + str(VAL_PATCH.shape) ) print("Validation label Shape: " + str(VAL_LABELS.shape) ) print("+-------------------------------------+") print("\nFinished processing.......\n Looking at some sample images") ''' Below Code written by Ringo to Visualise lamdba distribution ''' plot_random_spec_img(TRAIN_PATCH, TRAIN_LABELS) plot_random_spec_img(TEST_PATCH, TEST_LABELS) plot_random_spec_img(VAL_PATCH, VAL_LABELS) # Show origin statlie image plotStatlieImage(statlie_image) # Show normalised statlie image plotStatlieImage(statlie_image_2) # Show transposed statlie image (reflection along x=y asix) plotStatlieImage(statlie_image_3, bird=True)
manager.py
#!/usr/bin/env python3.7 import os import time import sys import fcntl import errno import signal import shutil import subprocess import datetime import textwrap from selfdrive.swaglog import cloudlog, add_logentries_handler from common.basedir import BASEDIR, PARAMS from common.android import ANDROID WEBCAM = os.getenv("WEBCAM") is not None sys.path.append(os.path.join(BASEDIR, "pyextra")) os.environ['BASEDIR'] = BASEDIR TOTAL_SCONS_NODES = 1140 prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt')) # Create folders needed for msgq try: os.mkdir("/dev/shm") except FileExistsError: pass except PermissionError: print("WARNING: failed to make /dev/shm") if ANDROID: os.chmod("/dev/shm", 0o777) def unblock_stdout(): # get a non-blocking stdout child_pid, child_pty = os.forkpty() if child_pid != 0: # parent # child is in its own process group, manually pass kill signals signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT)) signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM)) fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) while True: try: dat = os.read(child_pty, 4096) except OSError as e: if e.errno == errno.EIO: break continue if not dat: break try: sys.stdout.write(dat.decode('utf8')) except (OSError, IOError, UnicodeDecodeError): pass # os.wait() returns a tuple with the pid and a 16 bit value # whose low byte is the signal number and whose high byte is the exit satus exit_status = os.wait()[1] >> 8 os._exit(exit_status) if __name__ == "__main__": unblock_stdout() if __name__ == "__main__" and ANDROID: from common.spinner import Spinner from common.text_window import TextWindow else: from common.spinner import FakeSpinner as Spinner from common.text_window import FakeTextWindow as TextWindow import importlib import traceback from multiprocessing import Process # Run scons spinner = Spinner() spinner.update("0") if not prebuilt: for retry in [True, False]: # run scons env = os.environ.copy() env['SCONS_PROGRESS'] = "1" env['SCONS_CACHE'] = "1" nproc = os.cpu_count() j_flag = "" if nproc is None else "-j%d" % (nproc - 1) # scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) scons = subprocess.Popen(["scons", "-j 4","--max-drift=1" ,"--implicit-deps-unchanged"], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) compile_output = [] # Read progress from stderr and update spinner while scons.poll() is None: try: line = scons.stderr.readline() if line is None: continue line = line.rstrip() prefix = b'progress: ' if line.startswith(prefix): i = int(line[len(prefix):]) if spinner is not None: spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES))) elif len(line): compile_output.append(line) print(line.decode('utf8', 'replace')) except Exception: pass if scons.returncode != 0: # Read remaining output r = scons.stderr.read().split(b'\n') compile_output += r if retry: print("scons build failed, cleaning in") for i in range(3,-1,-1): print("....%d" % i) time.sleep(1) subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env) shutil.rmtree("/tmp/scons_cache") else: # Build failed log errors errors = [line.decode('utf8', 'replace') for line in compile_output if any([err in line for err in [b'error: ', b'not found, needed by target']])] error_s = "\n".join(errors) add_logentries_handler(cloudlog) cloudlog.error("scons build failed\n" + error_s) # Show TextWindow error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors]) with TextWindow("Openpilot failed to build\n \n" + error_s) as t: t.wait_for_exit() exit(1) else: break import cereal import cereal.messaging as messaging from common.params import Params import selfdrive.crash as crash from selfdrive.registration import register from selfdrive.version import version, dirty from selfdrive.loggerd.config import ROOT from selfdrive.launcher import launcher from common import android from common.apk import update_apks, pm_apply_packages, start_offroad from common.manager_helpers import print_cpu_usage ThermalStatus = cereal.log.ThermalData.ThermalStatus # comment out anything you don't want to run managed_processes = { "thermald": "selfdrive.thermald.thermald", #"uploader": "selfdrive.loggerd.uploader", #"deleter": "selfdrive.loggerd.deleter", "controlsd": "selfdrive.controls.controlsd", "plannerd": "selfdrive.controls.plannerd", "radard": "selfdrive.controls.radard", "dmonitoringd": "selfdrive.controls.dmonitoringd", "ubloxd": ("selfdrive/locationd", ["./ubloxd"]), #"loggerd": ("selfdrive/loggerd", ["./loggerd"]), #"logmessaged": "selfdrive.logmessaged", "locationd": "selfdrive.locationd.locationd", #"tombstoned": "selfdrive.tombstoned", # "logcatd": ("selfdrive/logcatd", ["./logcatd"]), "proclogd": ("selfdrive/proclogd", ["./proclogd"]), "boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly "pandad": "selfdrive.pandad", "ui": ("selfdrive/ui", ["./ui"]), "calibrationd": "selfdrive.locationd.calibrationd", "paramsd": ("selfdrive/locationd", ["./paramsd"]), "camerad": ("selfdrive/camerad", ["./camerad"]), "sensord": ("selfdrive/sensord", ["./sensord"]), "clocksd": ("selfdrive/clocksd", ["./clocksd"]), "gpsd": ("selfdrive/sensord", ["./gpsd"]), # "updated": "selfdrive.updated", "dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]), "modeld": ("selfdrive/modeld", ["./modeld"]), "driverview": "selfdrive.controls.lib.driverview", } daemon_processes = { "manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"), } running = {} def get_running(): return running # due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption unkillable_processes = ['camerad'] # processes to end with SIGINT instead of SIGTERM interrupt_processes = [] # processes to end with SIGKILL instead of SIGTERM kill_processes = ['sensord', 'paramsd'] # processes to end if thermal conditions exceed Green parameters green_temp_processes = ['uploader'] persistent_processes = [ 'thermald', 'logmessaged', 'ui', 'uploader', ] if ANDROID: persistent_processes += [ 'logcatd', 'tombstoned', 'updated', ] car_started_processes = [ 'controlsd', 'plannerd', 'loggerd', 'radard', 'dmonitoringd', 'calibrationd', 'paramsd', 'camerad', 'modeld', 'proclogd', 'ubloxd', 'locationd', ] if WEBCAM: car_started_processes += [ 'dmonitoringmodeld', ] if ANDROID: car_started_processes += [ 'sensord', 'clocksd', 'gpsd', 'dmonitoringmodeld', 'deleter', ] def register_managed_process(name, desc, car_started=False): global managed_processes, car_started_processes, persistent_processes print("registering %s" % name) managed_processes[name] = desc if car_started: car_started_processes.append(name) else: persistent_processes.append(name) # ****************** process management functions ****************** def nativelauncher(pargs, cwd): # exec the process os.chdir(cwd) # because when extracted from pex zips permissions get lost -_- os.chmod(pargs[0], 0o700) os.execvp(pargs[0], pargs) def start_managed_process(name): if name in running or name not in managed_processes: return proc = managed_processes[name] if isinstance(proc, str): cloudlog.info("starting python %s" % proc) running[name] = Process(name=name, target=launcher, args=(proc,)) else: pdir, pargs = proc cwd = os.path.join(BASEDIR, pdir) cloudlog.info("starting process %s" % name) running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd)) running[name].start() def start_daemon_process(name): params = Params() proc, pid_param = daemon_processes[name] pid = params.get(pid_param, encoding='utf-8') if pid is not None: try: os.kill(int(pid), 0) with open(f'/proc/{pid}/cmdline') as f: if proc in f.read(): # daemon is running return except (OSError, FileNotFoundError): # process is dead pass cloudlog.info("starting daemon %s" % name) proc = subprocess.Popen(['python', '-m', proc], stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), preexec_fn=os.setpgrp) params.put(pid_param, str(proc.pid)) def prepare_managed_process(p): proc = managed_processes[p] if isinstance(proc, str): # import this python cloudlog.info("preimporting %s" % proc) importlib.import_module(proc) elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")): # build this process cloudlog.info("building %s" % (proc,)) try: subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) except subprocess.CalledProcessError: # make clean if the build failed cloudlog.warning("building %s failed, make clean" % (proc, )) subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0])) subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) def join_process(process, timeout): # Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382 # We have to poll the exitcode instead t = time.time() while time.time() - t < timeout and process.exitcode is None: time.sleep(0.001) def kill_managed_process(name): if name not in running or name not in managed_processes: return cloudlog.info("killing %s" % name) if running[name].exitcode is None: if name in interrupt_processes: os.kill(running[name].pid, signal.SIGINT) elif name in kill_processes: os.kill(running[name].pid, signal.SIGKILL) else: running[name].terminate() join_process(running[name], 5) if running[name].exitcode is None: if name in unkillable_processes: cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name) join_process(running[name], 15) if running[name].exitcode is None: cloudlog.critical("FORCE REBOOTING PHONE!") os.system("date >> /sdcard/unkillable_reboot") os.system("reboot") raise RuntimeError else: cloudlog.info("killing %s with SIGKILL" % name) os.kill(running[name].pid, signal.SIGKILL) running[name].join() cloudlog.info("%s is dead with %d" % (name, running[name].exitcode)) del running[name] def cleanup_all_processes(signal, frame): cloudlog.info("caught ctrl-c %s %s" % (signal, frame)) if ANDROID: pm_apply_packages('disable') for name in list(running.keys()): kill_managed_process(name) cloudlog.info("everything is dead") # ****************** run loop ****************** def manager_init(should_register=True): if should_register: reg_res = register() if reg_res: dongle_id = reg_res else: raise Exception("server registration failed") else: dongle_id = "c"*16 # set dongle id cloudlog.info("dongle id is " + dongle_id) os.environ['DONGLE_ID'] = dongle_id cloudlog.info("dirty is %d" % dirty) if not dirty: os.environ['CLEAN'] = '1' cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True) crash.bind_user(id=dongle_id) crash.bind_extra(version=version, dirty=dirty, is_eon=True) os.umask(0) try: os.mkdir(ROOT, 0o777) except OSError: pass # ensure shared libraries are readable by apks if ANDROID: os.chmod(BASEDIR, 0o755) os.chmod(os.path.join(BASEDIR, "cereal"), 0o755) os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755) def manager_thread(): # now loop thermal_sock = messaging.sub_sock('thermal') if os.getenv("GET_CPU_USAGE"): proc_sock = messaging.sub_sock('procLog', conflate=True) cloudlog.info("manager start") cloudlog.info({"environ": os.environ}) # save boot log subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) params = Params() # start daemon processes for p in daemon_processes: start_daemon_process(p) # start persistent processes for p in persistent_processes: start_managed_process(p) # start offroad if ANDROID: pm_apply_packages('enable') start_offroad() if os.getenv("NOBOARD") is None: start_managed_process("pandad") if os.getenv("BLOCK") is not None: for k in os.getenv("BLOCK").split(","): del managed_processes[k] logger_dead = False start_t = time.time() first_proc = None while 1: msg = messaging.recv_sock(thermal_sock, wait=True) # heavyweight batch processes are gated on favorable thermal conditions if msg.thermal.thermalStatus >= ThermalStatus.yellow: for p in green_temp_processes: if p in persistent_processes: kill_managed_process(p) else: for p in green_temp_processes: if p in persistent_processes: start_managed_process(p) if msg.thermal.freeSpace < 0.05: logger_dead = True if msg.thermal.started and "driverview" not in running: for p in car_started_processes: if (p == "loggerd" and logger_dead) or p == "uploader": kill_managed_process(p) else: start_managed_process(p) else: logger_dead = False for p in reversed(car_started_processes): kill_managed_process(p) # this is ugly if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1": start_managed_process("driverview") elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0": kill_managed_process("driverview") # check the status of all processes, did any of them die? running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running] cloudlog.debug(' '.join(running_list)) # Exit main loop when uninstall is needed if params.get("DoUninstall", encoding='utf8') == "1": break if os.getenv("GET_CPU_USAGE"): dt = time.time() - start_t # Get first sample if dt > 30 and first_proc is None: first_proc = messaging.recv_sock(proc_sock) # Get last sample and exit if dt > 90: last_proc = messaging.recv_sock(proc_sock, wait=True) cleanup_all_processes(None, None) sys.exit(print_cpu_usage(first_proc, last_proc)) def manager_prepare(spinner=None): # build all processes os.chdir(os.path.dirname(os.path.abspath(__file__))) # Spinner has to start from 70 here total = 100.0 if prebuilt else 30.0 for i, p in enumerate(managed_processes): if spinner is not None: spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),)) prepare_managed_process(p) def uninstall(): cloudlog.warning("uninstalling") with open('/cache/recovery/command', 'w') as f: f.write('--wipe_data\n') # IPowerManager.reboot(confirm=false, reason="recovery", wait=true) android.reboot(reason="recovery") def main(): os.environ['PARAMS_PATH'] = PARAMS # the flippening! os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1') # disable bluetooth os.system('service call bluetooth_manager 8') params = Params() params.manager_start() default_params = [ ("CommunityFeaturesToggle", "0"), ("CompletedTrainingVersion", "0"), ("IsRHD", "0"), ("IsMetric", "0"), ("RecordFront", "0"), ("HasAcceptedTerms", "0"), ("HasCompletedSetup", "0"), ("IsUploadRawEnabled", "1"), ("IsLdwEnabled", "1"), ("IsGeofenceEnabled", "-1"), ("SpeedLimitOffset", "0"), ("LongitudinalControl", "0"), ("LimitSetSpeed", "0"), ("LimitSetSpeedNeural", "0"), ("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')), ("OpenpilotEnabledToggle", "1"), ("LaneChangeEnabled", "1"), ("IsDriverViewEnabled", "0"), ] # set unset params for k, v in default_params: if params.get(k) is None: params.put(k, v) # is this chffrplus? if os.getenv("PASSIVE") is not None: params.put("Passive", str(int(os.getenv("PASSIVE")))) if params.get("Passive") is None: raise Exception("Passive must be set to continue") if ANDROID: update_apks() manager_init() manager_prepare(spinner) spinner.close() if os.getenv("PREPAREONLY") is not None: return # SystemExit on sigterm signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1)) try: manager_thread() except SystemExit: raise except Exception: traceback.print_exc() crash.capture_exception() finally: cleanup_all_processes(None, None) if params.get("DoUninstall", encoding='utf8') == "1": uninstall() if __name__ == "__main__": try: main() except Exception: add_logentries_handler(cloudlog) cloudlog.exception("Manager failed to start") # Show last 3 lines of traceback error = traceback.format_exc(3) error = "Manager failed to start\n \n" + error with TextWindow(error) as t: t.wait_for_exit() raise # manual exit because we are forked sys.exit(0)
utils.py
# -*- coding: utf-8 -*- # Copyright 2020 the HERA Project # Licensed under the MIT License import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter import matplotlib.patches as mpatches import matplotlib.gridspec as gridspec import numpy as np from pyuvdata import UVCal, UVData, UVFlag, utils import os import sys import glob import uvtools as uvt from astropy.time import Time from astropy.coordinates import EarthLocation, AltAz, Angle from astropy.coordinates import SkyCoord as sc import pandas import warnings import copy from hera_mc import cm_hookup, geo_sysdef import math from uvtools import dspec import hera_qm from hera_mc import cm_active from matplotlib.lines import Line2D from matplotlib import colors import json from hera_notebook_templates.data import DATA_PATH from astropy.io import fits import csv from astropy import units as u from astropy_healpix import HEALPix from astropy.coordinates import Galactic import healpy from multiprocessing import Process, Queue from bokeh.layouts import row, column from bokeh.models import CustomJS, Select, RadioButtonGroup, Range1d from bokeh.plotting import figure, output_file, show, ColumnDataSource from bokeh.io import output_notebook import scipy warnings.filterwarnings('ignore') # useful global variables status_colors = { 'dish_maintenance' : 'salmon', 'dish_ok' : 'red', 'RF_maintenance' : 'lightskyblue', 'RF_ok' : 'royalblue', 'digital_maintenance' : 'plum', 'digital_ok' : 'mediumpurple', 'calibration_maintenance' : 'lightgreen', 'calibration_ok' : 'green', 'calibration_triage' : 'lime'} status_abbreviations = { 'dish_maintenance' : 'dish-M', 'dish_ok' : 'dish-OK', 'RF_maintenance' : 'RF-M', 'RF_ok' : 'RF-OK', 'digital_maintenance' : 'dig-M', 'digital_ok' : 'dig-OK', 'calibration_maintenance' : 'cal-M', 'calibration_ok' : 'cal-OK', 'calibration_triage' : 'cal-Tri'} def get_use_ants(uvd,statuses,jd): statuses = statuses.split(',') ants = np.unique(np.concatenate((uvd.ant_1_array, uvd.ant_2_array))) use_ants = [] h = cm_active.ActiveData(at_date=jd) h.load_apriori() for ant_name in h.apriori: ant = int("".join(filter(str.isdigit, ant_name))) if ant in ants: status = h.apriori[ant_name].status if status in statuses: use_ants.append(ant) return use_ants def read_template(pol='XX'): if pol == 'XX': polstr = 'north' elif pol == 'YY': polstr = 'east' temp_path = f'{DATA_PATH}/templates/{polstr}_template.json' with open(temp_path) as f: data = json.load(f) return data def flag_by_template(uvd,HHfiles,jd,use_ants='auto',pols=['XX','YY'],polDirs=['NN','EE'],temp_norm=True,plotMap=False): use_files, use_lsts, use_file_inds = get_hourly_files(uvd,HHfiles,jd) temp = {} ant_dfs = {} for pol in pols: temp[pol] = read_template(pol) ant_dfs[pol] = {} if use_ants == 'auto': use_ants = uvd.get_ants() flaggedAnts = {polDirs[0]: [], polDirs[1]: []} for i,lst in enumerate(use_lsts): # print(lst) hdat = UVData() hdat.read(use_files[i],antenna_nums=use_ants) for p,pol in enumerate(pols): ant_dfs[pol][lst] = {} ranges = np.asarray(temp[pol]['lst_ranges'][0]) if len(np.argwhere(ranges[:,0]<lst)) > 0: ind = np.argwhere(ranges[:,0]<lst)[-1][0] else: if p == 0: print(f'No template for lst={lst} - skipping') continue dat = np.abs(temp[pol][str(ind)]) if temp_norm is True: medpower = np.nanmedian(np.log10(np.abs(hdat.data_array))) medtemp = np.nanmedian(dat) norm = np.divide(medpower,medtemp) dat = np.multiply(dat,norm) for ant in use_ants: d = np.log10(np.abs(hdat.get_data((ant,ant,pol)))) d = np.average(d,axis=0) df = np.abs(np.subtract(dat,d)) ant_dfs[pol][lst][ant] = np.nanmedian(df) if plotMap is True: fig = plt.figure(figsize=(18,10)) cmap = plt.get_cmap('inferno') sm = plt.cm.ScalarMappable(cmap=cmap,norm=plt.Normalize(vmin=0,vmax=1)) sm._A = [] ampmin=100000000000000 ampmax=0 for ant in use_ants: amp = ant_dfs[pol][lst][ant] if amp > ampmax: ampmax=amp elif amp < ampmin: ampmin=amp rang = ampmax-ampmin for ant in use_ants: idx = np.argwhere(hdat.antenna_numbers == ant)[0][0] antPos = hdat.antenna_positions[idx] amp = ant_dfs[pol][lst][ant] if math.isnan(amp): marker="v" color="r" markersize=30 else: cind = float((amp-ampmin)/rang) if plotMap is True: coloramp = cmap(cind) color=coloramp marker="h" markersize=40 if cind > 0.15 and ant not in flaggedAnts[polDirs[p]]: flaggedAnts[polDirs[p]].append(ant) if plotMap is True: plt.plot(antPos[1],antPos[2],marker=marker,markersize=markersize,color=color) if math.isnan(amp) or coloramp[0]>0.6: plt.text(antPos[1]-3,antPos[2],str(ant),color='black') else: plt.text(antPos[1]-3,antPos[2],str(ant),color='white') if plotMap is True: plt.title(f'{polDirs[p]} pol, {lst} hours') cbar = fig.colorbar(sm) cbar.set_ticks([]) return ant_dfs, flaggedAnts def load_data(data_path,JD): HHfiles = sorted(glob.glob("{0}/zen.{1}.*.sum.uvh5".format(data_path,JD))) difffiles = sorted(glob.glob("{0}/zen.{1}.*.diff.uvh5".format(data_path,JD))) HHautos = sorted(glob.glob("{0}/zen.{1}.*.sum.autos.uvh5".format(data_path,JD))) diffautos = sorted(glob.glob("{0}/zen.{1}.*.diff.autos.uvh5".format(data_path,JD))) Nfiles = len(HHfiles) hhfile_bases = map(os.path.basename, HHfiles) hhdifffile_bases = map(os.path.basename, difffiles) sep = '.' x = sep.join(HHfiles[0].split('.')[-4:-2]) y = sep.join(HHfiles[-1].split('.')[-4:-2]) print(f'{len(HHfiles)} sum files found between JDs {x} and {y}') x = sep.join(difffiles[0].split('.')[-4:-2]) y = sep.join(difffiles[-1].split('.')[-4:-2]) print(f'{len(difffiles)} diff files found between JDs {x} and {y}') x = sep.join(HHautos[0].split('.')[-5:-3]) y = sep.join(HHautos[-1].split('.')[-5:-3]) print(f'{len(HHautos)} sum auto files found between JDs {x} and {y}') x = sep.join(diffautos[0].split('.')[-5:-3]) y = sep.join(diffautos[-1].split('.')[-5:-3]) print(f'{len(diffautos)} diff auto files found between JDs {x} and {y}') # choose one for single-file plots hhfile1 = HHfiles[len(HHfiles)//2] difffile1 = difffiles[len(difffiles)//2] if len(HHfiles) != len(difffiles): print('############################################################') print('######### DIFFERENT NUMBER OF SUM AND DIFF FILES ###########') print('############################################################') # Load data uvd_hh = UVData() unread = True while unread is True: try: uvd_hh.read(hhfile1, skip_bad_files=True) except: hhfile += 1 continue unread = False uvd_xx1 = uvd_hh.select(polarizations = -5, inplace = False) uvd_xx1.ants = np.unique(np.concatenate([uvd_xx1.ant_1_array, uvd_xx1.ant_2_array])) # -5: 'xx', -6: 'yy', -7: 'xy', -8: 'yx' uvd_yy1 = uvd_hh.select(polarizations = -6, inplace = False) uvd_yy1.ants = np.unique(np.concatenate([uvd_yy1.ant_1_array, uvd_yy1.ant_2_array])) return HHfiles, difffiles, HHautos, diffautos, uvd_xx1, uvd_yy1 def load_data_ds(data_path,JD): HHfiles = sorted(glob.glob("{0}/zen.{1}.*.sum.uvh5".format(data_path,JD))) difffiles = [HHfile.split('sum')[0]+'diff.uvh5' for HHfile in HHfiles] Nfiles = len(HHfiles) hhfile_bases = map(os.path.basename, HHfiles) hhdifffile_bases = map(os.path.basename, difffiles) sep = '.' x = sep.join(HHfiles[0].split('.')[-4:-2]) y = sep.join(HHfiles[-1].split('.')[-4:-2]) print(f'{len(HHfiles)} sum files found between JDs {x} and {y}') x = sep.join(difffiles[0].split('.')[-4:-2]) y = sep.join(difffiles[-1].split('.')[-4:-2]) print(f'{len(difffiles)} diff files found between JDs {x} and {y}') # choose one for single-file plots hhfile1 = HHfiles[len(HHfiles)//2] difffile1 = difffiles[len(difffiles)//2] if len(HHfiles) != len(difffiles): print('############################################################') print('######### DIFFERENT NUMBER OF SUM AND DIFF FILES ###########') print('############################################################') # Load data uvd_hh = UVData() unread = True while unread is True: try: uvd_hh.read(hhfile1, skip_bad_files=True) except: hhfile += 1 continue unread = False uvd_xx1 = uvd_hh.select(polarizations = -5, inplace = False) uvd_xx1.ants = np.unique(np.concatenate([uvd_xx1.ant_1_array, uvd_xx1.ant_2_array])) # -5: 'xx', -6: 'yy', -7: 'xy', -8: 'yx' uvd_yy1 = uvd_hh.select(polarizations = -6, inplace = False) uvd_yy1.ants = np.unique(np.concatenate([uvd_yy1.ant_1_array, uvd_yy1.ant_2_array])) return HHfiles, difffiles, uvd_xx1, uvd_yy1 def plot_sky_map(uvd,ra_pad=20,dec_pad=30,clip=True,fwhm=11,nx=300,ny=200,sources=[]): map_path = f'{DATA_PATH}/haslam408_dsds_Remazeilles2014.fits' hdulist = fits.open(map_path) # Set up the HEALPix projection nside = hdulist[1].header['NSIDE'] order = hdulist[1].header['ORDERING'] hp = HEALPix(nside=nside, order=order, frame=Galactic()) #Get RA/DEC coords of observation loc = EarthLocation.from_geocentric(*uvd.telescope_location, unit='m') time_array = uvd.time_array obstime_start = Time(time_array[0],format='jd',location=loc) obstime_end = Time(time_array[-1],format='jd',location=loc) zenith_start = sc(Angle(0, unit='deg'),Angle(90,unit='deg'),frame='altaz',obstime=obstime_start,location=loc) zenith_start = zenith_start.transform_to('icrs') zenith_end = sc(Angle(0, unit='deg'),Angle(90,unit='deg'),frame='altaz',obstime=obstime_end,location=loc) zenith_end = zenith_end.transform_to('icrs') lst_start = obstime_start.sidereal_time('mean').hour lst_end = obstime_end.sidereal_time('mean').hour start_coords = [zenith_start.ra.degree,zenith_start.dec.degree] if start_coords[0] > 180: start_coords[0] = start_coords[0] - 360 end_coords = [zenith_end.ra.degree,zenith_end.dec.degree] if end_coords[0] > 180: end_coords[0] = end_coords[0] - 360 # Sample a 300x200 grid in RA/Dec ra_range = [zenith_start.ra.degree-ra_pad, zenith_end.ra.degree+ra_pad] if ra_range[0]>180: ra_range[0] = ra_range[0]-360 dec_range = [zenith_start.dec.degree-dec_pad, zenith_end.dec.degree+dec_pad] if clip == True: ra = np.linspace(ra_range[0],ra_range[1], nx) dec = np.linspace(dec_range[0],dec_range[1], ny) else: ra = np.linspace(-180,180,nx) dec = np.linspace(-90,zenith_start.dec.degree+90,ny) ra_grid, dec_grid = np.meshgrid(ra * u.deg, dec * u.deg) #Create alpha grid alphas = np.ones(ra_grid.shape) alphas = np.multiply(alphas,0.5) ra_min = np.argmin(np.abs(np.subtract(ra,start_coords[0]-fwhm/2))) ra_max = np.argmin(np.abs(np.subtract(ra,end_coords[0]+fwhm/2))) dec_min = np.argmin(np.abs(np.subtract(dec,start_coords[1]-fwhm/2))) dec_max = np.argmin(np.abs(np.subtract(dec,end_coords[1]+fwhm/2))) alphas[dec_min:dec_max, ra_min:ra_max] = 1 # Set up Astropy coordinate objects coords = sc(ra_grid.ravel(), dec_grid.ravel(), frame='icrs') # Interpolate values temperature = healpy.read_map(map_path) tmap = hp.interpolate_bilinear_skycoord(coords, temperature) tmap = tmap.reshape((ny, nx)) tmap = np.flip(tmap,axis=1) alphas = np.flip(alphas,axis=1) # Make a plot of the interpolated temperatures plt.figure(figsize=(12, 7)) im = plt.imshow(tmap, extent=[ra[-1], ra[0], dec[0], dec[-1]], cmap=plt.cm.viridis, aspect='auto', vmin=10,vmax=40,alpha=alphas,origin='lower') # im = plt.imshow(tmap, # cmap=plt.cm.viridis, aspect='auto', vmin=10,vmax=40,alpha=alphas,origin='lower') plt.xlabel('RA (ICRS)') plt.ylabel('DEC (ICRS)') lsts = uvd.lst_array*3.819719 inds = np.unique(lsts,return_index=True)[1] lsts = [lsts[ind] for ind in sorted(inds)] lsts_use = lsts[0::52] xcoords = np.linspace(start_coords[0],end_coords[0],len(lsts))[0::52] plt.xlabel('RA (ICRS)') plt.ylabel('DEC (ICRS)') plt.hlines(y=start_coords[1]-fwhm/2,xmin=ra[-1],xmax=ra[0],linestyles='dashed') plt.hlines(y=start_coords[1]+fwhm/2,xmin=ra[-1],xmax=ra[0],linestyles='dashed') # plt.vlines(x=start_coords[0],ymin=start_coords[1],ymax=dec[-1],linestyles='dashed') plt.vlines(x=end_coords[0],ymin=start_coords[1],ymax=dec[-1],linestyles='dashed') # plt.annotate(np.around(lst_start,2),xy=(start_coords[0],dec[-1]),xytext=(0,8), # fontsize=10,xycoords='data',textcoords='offset points',horizontalalignment='center') plt.annotate(np.around(lst_end,1),xy=(end_coords[0],dec[-1]),xytext=(0,8), fontsize=10,xycoords='data',textcoords='offset points',horizontalalignment='center') for i,lst in enumerate(lsts_use): plt.annotate(np.around(lst,1),xy=(xcoords[i],dec[-1]),xytext=(0,8), fontsize=10,xycoords='data',textcoords='offset points',horizontalalignment='center') plt.vlines(x=xcoords[i],ymin=start_coords[1],ymax=dec[-1],linestyles='dashed') plt.annotate('LST (hours)',xy=(np.average([start_coords[0],end_coords[0]]),dec[-1]), xytext=(0,22),fontsize=10,xycoords='data',textcoords='offset points',horizontalalignment='center') for s in sources: if s[1] > dec[0] and s[1] < dec[-1]: if s[0] > 180: s = (s[0]-360,s[1],s[2]) if s[0]>ra[0] and s[0]<ra[-1]: if s[2] == 'LMC' or s[2] == 'SMC': plt.annotate(s[2],xy=(s[0],s[1]),xycoords='data',fontsize=8,xytext=(20,-20), textcoords='offset points',arrowprops=dict(facecolor='black', shrink=2,width=1, headwidth=4)) else: plt.scatter(s[0],s[1],c='k',s=6) if len(s[2]) > 0: plt.annotate(s[2],xy=(s[0]+3,s[1]-4),xycoords='data',fontsize=6) plt.show() plt.close() hdulist.close() def plot_inspect_ants(uvd1,jd,badAnts=[],flaggedAnts={},tempAnts={},crossedAnts=[],use_ants='auto'): status_use = ['RF_ok','digital_ok','calibration_maintenance','calibration_ok','calibration_triage'] if use_ants == 'auto': use_ants = uvd1.get_ants() h = cm_active.ActiveData(at_date=jd) h.load_apriori() inspectAnts = [] for ant in use_ants: status = h.apriori[f'HH{ant}:A'].status if ant in badAnts or ant in flaggedAnts.keys() or ant in crossedAnts: if status in status_use: inspectAnts.append(ant) for k in tempAnts.keys(): if ant in tempAnts[k] and status in status_use: inspectAnts.append(ant) inspectAnts = np.unique(inspectAnts) inspectTitles = {} for ant in inspectAnts: inspectTitles[ant] = 'Flagged by: ' if ant in badAnts: inspectTitles[ant] = f'{inspectTitles[ant]} correlation matrix,' if ant in flaggedAnts.keys(): inspectTitles[ant] = f'{inspectTitles[ant]} ant_metrics,' if ant in crossedAnts: inspectTitles[ant] = f'{inspectTitles[ant]} cross matrix,' try: for k in tempAnts.keys(): if ant in tempAnts[k]: inspectTitles[ant] = f'{inspectTitles[ant]} template - {k},' except: continue if inspectTitles[ant][-1] == ',': inspectTitles[ant] = inspectTitles[ant][:-1] print('Antennas that require further inspection are:') print(inspectAnts) for ant in inspectAnts: auto_waterfall_lineplot(uvd1,ant,jd,title=inspectTitles[ant]) return inspectAnts def auto_waterfall_lineplot(uv, ant, jd, pols=['xx','yy'], colorbar_min=1e6, colorbar_max=1e8, title=''): h = cm_active.ActiveData(at_date=jd) h.load_apriori() status = h.apriori[f'HH{ant}:A'].status freq = uv.freq_array[0]*1e-6 fig = plt.figure(figsize=(12,8)) gs = gridspec.GridSpec(3, 2, height_ratios=[2,0.7,1]) it = 0 pol_dirs = ['NN','EE'] for p,pol in enumerate(pols): waterfall= plt.subplot(gs[it]) jd_ax=plt.gca() times= np.unique(uv.time_array) d = np.abs(uv.get_data((ant,ant, pol))) if len(np.nonzero(d)[0])==0: print('#########################################') print(f'Data for antenna {ant} is entirely zeros') print('#########################################') plt.close() return im = plt.imshow(d,norm=colors.LogNorm(), aspect='auto') abb = status_abbreviations[status] waterfall.set_title(f'{pol_dirs[p]} pol') freqs = uv.freq_array[0, :] / 1000000 xticks = np.arange(0, len(freqs), 120) plt.xticks(xticks, labels =np.around(freqs[xticks],2)) if p == 0: jd_ax.set_ylabel('JD') jd_yticks = [int(i) for i in np.linspace(0,len(times)-1,8)] jd_labels = np.around(times[jd_yticks],2) jd_ax.set_yticks(jd_yticks) jd_ax.set_yticklabels(jd_labels) jd_ax.autoscale(False) if p == 1: lst_ax = jd_ax.twinx() lst_ax.set_ylabel('LST (hours)') lsts = uv.lst_array*3.819719 inds = np.unique(lsts,return_index=True)[1] lsts = [lsts[ind] for ind in sorted(inds)] lst_yticks = [int(i) for i in np.linspace(0,len(lsts)-1,8)] lst_labels = np.around([lsts[i] for i in lst_yticks],2) lst_ax.set_yticks(lst_yticks) lst_ax.set_yticklabels(lst_labels) lst_ax.set_ylim(jd_ax.get_ylim()) lst_ax.autoscale(False) jd_ax.set_yticks([]) line= plt.subplot(gs[it+2]) averaged_data= np.abs(np.average(uv.get_data((ant,ant,pol)),0)) plt.plot(freq,averaged_data) line.set_yscale('log') if p == 0: line.set_ylabel('Night Average') else: line.set_yticks([]) line.set_xlim(freq[0],freq[-1]) line.set_xticks([]) line2 = plt.subplot(gs[it+4]) dat = uv.get_data((ant,ant,pol)) dat = np.abs(dat[len(dat)//2,:]) plt.plot(freq,dat) line2.set_yscale('log') line2.set_xlabel('Frequency (MHz)') if p == 0: line2.set_ylabel('Single Slice') else: line2.set_yticks([]) line2.set_xlim(freq[0],freq[-1]) plt.setp(waterfall.get_xticklabels(), visible=False) plt.subplots_adjust(hspace=.0) cbar = plt.colorbar(im, pad= 0.2, orientation = 'horizontal') cbar.set_label('Power') it=1 fig.suptitle(f'{ant} ({abb})', fontsize=10, backgroundcolor=status_colors[status],y=0.96) plt.annotate(title, xy=(0.5,0.94), ha='center',xycoords='figure fraction') plt.show() plt.close() def plot_autos(uvdx, uvdy): nodes, antDict, inclNodes = generate_nodeDict(uvdx) ants = uvdx.get_ants() sorted_ants = sort_antennas(uvdx) freqs = (uvdx.freq_array[0])*10**(-6) times = uvdx.time_array lsts = uvdx.lst_array maxants = 0 for node in nodes: n = len(nodes[node]['ants']) if n>maxants: maxants = n Nants = len(ants) Nside = maxants Yside = len(inclNodes) t_index = 0 jd = times[t_index] utc = Time(jd, format='jd').datetime h = cm_active.ActiveData(at_date=jd) h.load_apriori() xlim = (np.min(freqs), np.max(freqs)) ylim = (55, 85) fig, axes = plt.subplots(Yside, Nside, figsize=(16,Yside*3)) ptitle = 1.92/(Yside*3) fig.suptitle("JD = {0}, time = {1} UTC".format(jd, utc), fontsize=10,y=1+ptitle) fig.tight_layout(rect=(0, 0, 1, 0.95)) fig.subplots_adjust(left=.1, bottom=.1, right=.9, top=1, wspace=0.05, hspace=0.3) k = 0 for i,n in enumerate(inclNodes): ants = nodes[n]['ants'] j = 0 for _,a in enumerate(sorted_ants): if a not in ants: continue status = h.apriori[f'HH{a}:A'].status ax = axes[i,j] ax.set_xlim(xlim) ax.set_ylim(ylim) px, = ax.plot(freqs, 10*np.log10(np.abs(uvdx.get_data((a, a))[t_index])), color='r', alpha=0.75, linewidth=1) py, = ax.plot(freqs, 10*np.log10(np.abs(uvdy.get_data((a, a))[t_index])), color='b', alpha=0.75, linewidth=1) ax.grid(False, which='both') abb = status_abbreviations[status] ax.set_title(f'{a} ({abb})', fontsize=10, backgroundcolor=status_colors[status]) if k == 0: ax.legend([px, py], ['NN', 'EE']) if i == len(inclNodes)-1: [t.set_fontsize(10) for t in ax.get_xticklabels()] ax.set_xlabel('freq (MHz)', fontsize=10) else: ax.set_xticklabels([]) if j!=0: ax.set_yticklabels([]) else: [t.set_fontsize(10) for t in ax.get_yticklabels()] ax.set_ylabel(r'$10\cdot\log$(amp)', fontsize=10) j += 1 k += 1 for k in range(j,maxants): axes[i,k].axis('off') axes[i,maxants-1].annotate(f'Node {n}', (1.1,.3),xycoords='axes fraction',rotation=270) plt.show() plt.close() def plot_wfs(uvd, pol, mean_sub=False, save=False, jd=''): amps = np.abs(uvd.data_array[:, :, :, pol].reshape(uvd.Ntimes, uvd.Nants_data, uvd.Nfreqs, 1)) nodes, antDict, inclNodes = generate_nodeDict(uvd) ants = uvd.get_ants() sorted_ants = sort_antennas(uvd) freqs = (uvd.freq_array[0])*10**(-6) times = uvd.time_array lsts = uvd.lst_array*3.819719 inds = np.unique(lsts,return_index=True)[1] lsts = [lsts[ind] for ind in sorted(inds)] maxants = 0 polnames = ['xx','yy'] for node in nodes: n = len(nodes[node]['ants']) if n>maxants: maxants = n Nants = len(ants) Nside = maxants Yside = len(inclNodes) t_index = 0 jd = times[t_index] utc = Time(jd, format='jd').datetime h = cm_active.ActiveData(at_date=jd) h.load_apriori() ptitle = 1.92/(Yside*3) fig, axes = plt.subplots(Yside, Nside, figsize=(16,Yside*3)) if pol == 0: fig.suptitle("North Polarization", fontsize=14, y=1+ptitle) else: fig.suptitle("East Polarization", fontsize=14, y=1+ptitle) fig.tight_layout(rect=(0, 0, 1, 0.95)) fig.subplots_adjust(left=0, bottom=.1, right=.9, top=1, wspace=0.1, hspace=0.3) vmin = 6.5 vmax = 8 for i,n in enumerate(inclNodes): ants = nodes[n]['ants'] j = 0 for _,a in enumerate(sorted_ants): if a not in ants: continue status = h.apriori[f'HH{a}:A'].status abb = status_abbreviations[status] ax = axes[i,j] dat = np.log10(np.abs(uvd.get_data(a,a,polnames[pol]))) if mean_sub == True: ms = np.subtract(dat, np.nanmean(dat,axis=0)) im = ax.imshow(ms, vmin = -0.07, vmax = 0.07, aspect='auto',interpolation='nearest') else: im = ax.imshow(dat, vmin = vmin, vmax = vmax, aspect='auto',interpolation='nearest') ax.set_title(f'{a} ({abb})', fontsize=10,backgroundcolor=status_colors[status]) if i == len(inclNodes)-1: xticks = [int(i) for i in np.linspace(0,len(freqs)-1,3)] xticklabels = np.around(freqs[xticks],0) ax.set_xticks(xticks) ax.set_xticklabels(xticklabels) ax.set_xlabel('Freq (MHz)', fontsize=10) [t.set_rotation(70) for t in ax.get_xticklabels()] else: ax.set_xticklabels([]) if j != 0: ax.set_yticklabels([]) else: yticks = [int(i) for i in np.linspace(0,len(lsts)-1,6)] yticklabels = [np.around(lsts[ytick],1) for ytick in yticks] [t.set_fontsize(12) for t in ax.get_yticklabels()] ax.set_ylabel('Time(LST)', fontsize=10) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) ax.set_ylabel('Time(LST)', fontsize=10) j += 1 for k in range(j,maxants): axes[i,k].axis('off') pos = ax.get_position() cbar_ax=fig.add_axes([0.91,pos.y0,0.01,pos.height]) cbar = fig.colorbar(im, cax=cbar_ax) cbar.set_label(f'Node {n}',rotation=270, labelpad=15) if save is True: plt.savefig(f'{jd}_mean_subtracted_per_node_{pol}.png',bbox_inches='tight',dpi=300) plt.show() plt.close() def plot_mean_subtracted_wfs(uvd, use_ants, jd, pols=['xx','yy']): freqs = (uvd.freq_array[0])*1e-6 times = uvd.time_array lsts = uvd.lst_array*3.819719 inds = np.unique(lsts,return_index=True)[1] lsts = [lsts[ind] for ind in sorted(inds)] ants = sorted(use_ants) Nants = len(ants) pol_labels = ['NN','EE'] h = cm_active.ActiveData(at_date=jd) h.load_apriori() fig, axes = plt.subplots(Nants, 2, figsize=(7,Nants*2.2)) fig.suptitle('Mean Subtracted Waterfalls') fig.tight_layout(rect=(0, 0, 1, 0.95)) fig.subplots_adjust(left=.1, bottom=.1, right=.85, top=.975, wspace=0.05, hspace=0.2) for i,ant in enumerate(ants): status = h.apriori[f'HH{ant}:A'].status abb = status_abbreviations[status] color = status_colors[status] for j,pol in enumerate(pols): ax = axes[i,j] dat = np.log10(np.abs(uvd.get_data(ant,ant,pol))) ms = np.subtract(dat, np.nanmean(dat,axis=0)) im = ax.imshow(ms, vmin = -0.07, vmax = 0.07, aspect='auto',interpolation='nearest') ax.set_title(f'{ant} - {pol_labels[j]} ({abb})', fontsize=10, backgroundcolor=color) if j != 0: ax.set_yticklabels([]) else: yticks = [int(i) for i in np.linspace(0,len(lsts)-1,6)] yticklabels = [np.around(lsts[ytick],1) for ytick in yticks] [t.set_fontsize(12) for t in ax.get_yticklabels()] ax.set_ylabel('Time(LST)', fontsize=10) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) if i != Nants-1: ax.set_xticklabels([]) else: xticks = [int(i) for i in np.linspace(0,len(freqs)-1,8)] xticklabels = np.around(freqs[xticks],0) ax.set_xticks(xticks) ax.set_xticklabels(xticklabels) ax.set_xlabel('Frequency (MHz)', fontsize=10) if j == 1: pos = ax.get_position() cbar_ax=fig.add_axes([0.88,pos.y0,0.02,pos.height]) fig.colorbar(im, cax=cbar_ax) fig.show() def plot_closure(uvd, triad_length, pol): """Plot closure phase for an example triad. Parameters ---------- files : list of strings List of data filenames triad_length : float {14., 29.} Length of the triangle segment length. Must be 14 or 29. pol : str {xx, yy} Polarization to plot. Returns ------- None """ if triad_length == 14.: triad_list = [[0, 11, 12], [0, 1, 12], [1, 12, 13], [1, 2, 13], [2, 13, 14], [11, 23, 24], [11, 12, 24], [12, 24, 25], [12, 13, 25], [13, 25, 26], [13, 14, 26], [14, 26, 27], [23, 36, 37], [23, 24, 37], [24, 37, 38], [24, 25, 38], [25, 38, 39], [25, 26, 39], [26, 39, 40], [26, 27, 40], [27, 40, 41], [36, 37, 51], [37, 51, 52], [37, 38, 52], [38, 52, 53], [38, 39, 53], [39, 53, 54], [39, 40, 54], [40, 54, 55], [40, 41, 55], [51, 66, 67], [51, 52, 67], [53, 54, 69], [54, 69, 70], [54, 55, 70], [55, 70, 71], [65, 66, 82], [66, 82, 83], [66, 67, 83], [67, 83, 84], [70, 71, 87], [120, 121, 140], [121, 140, 141], [121, 122, 141], [122, 141, 142], [122, 123, 142], [123, 142, 143], [123, 124, 143]] else: triad_list = [[0, 23, 25], [0, 2, 25], [1, 24, 26], [2, 25, 27], [11, 36, 38], [11, 13, 38], [12, 37, 39], [12, 14, 39], [13, 38, 40], [14, 39, 41], [23, 25, 52], [24, 51, 53], [24, 26, 53], [25, 52, 54], [25, 27, 54], [26, 53, 55], [36, 65, 67], [36, 38, 67], [38, 67, 69], [38, 40, 69], [39, 41, 70], [40, 69, 71], [51, 82, 84], [51, 53, 84], [52, 83, 85], [52, 54, 85], [54, 85, 87], [83, 85, 120], [85, 120, 122], [85, 87, 122], [87, 122, 124]] # Look for a triad that exists in the data for triad in triad_list: bls = [[triad[0], triad[1]], [triad[1], triad[2]], [triad[2], triad[0]]] triad_in = True for bl in bls: inds = uvd.antpair2ind(bl[0], bl[1], ordered=False) if len(inds) == 0: triad_in = False break if triad_in: break if not triad_in: raise ValueError('Could not find triad in data.') closure_ph = np.angle(uvd.get_data(triad[0], triad[1], pol) * uvd.get_data(triad[1], triad[2], pol) * uvd.get_data(triad[2], triad[0], pol)) plt.imshow(closure_ph, aspect='auto', rasterized=True, interpolation='nearest', cmap = 'twilight') def plotNodeAveragedSummary(uv,HHfiles,jd,use_ants,pols=['xx','yy'],mat_pols=['xx','yy'], baseline_groups=[],removeBadAnts=False,plotRatios=False,plotSummary=True): """ Plots a summary of baseline correlations throughout a night for each baseline group specified, separated into inter-node and intra-node baselines, for each polarization specified. Parameters ---------- uv: UVData object UVData object containing any file from the desired night of observation. HHfiles: List A list of all files to be looked at for the desired night of observation. jd: String The JD of the night of observation pols: List A list containing the desired polarizations to look at. Options are any polarization strings accepted by pyuvdata. baseline_groups: [] A list containing the baseline types to look at, formatted as (length, N-S separation, label (str)). removeBadAnts: Bool Option to flag seemingly dead antennas and remove them from the per-baseline-group averaging. Returns ------- badAnts: List A list specifying the antennas flagged as dead or non-correlating. """ if baseline_groups == []: baseline_groups = [(14,0,'14m E-W'),(14,-11,'14m NW-SE'),(14,11,'14m SW-NE'),(29,0,'29m E-W'),(29,22,'29m SW-NE'), (44,0,'44m E-W'),(58.5,0,'58m E-W'),(73,0,'73m E-W'),(87.6,0,'88m E-W'), (102.3,0,'102m E-W')] nodeMedians,lsts,badAnts=get_correlation_baseline_evolutions(uv,HHfiles,jd,use_ants,pols=pols,mat_pols=mat_pols, bl_type=baseline_groups,removeBadAnts=removeBadAnts, plotRatios=plotRatios) pols = mat_pols if plotSummary is False: return badAnts if len(lsts)>1: fig,axs = plt.subplots(len(pols),2,figsize=(16,16)) maxLength = 0 cmap = plt.get_cmap('Blues') for group in baseline_groups: if group[0] > maxLength: maxLength = group[0] for group in baseline_groups: length = group[0] data = nodeMedians[group[2]] colorInd = float(length/maxLength) if len(data['inter']['xx']) == 0: continue for i in range(len(pols)): pol = pols[i] axs[i][0].plot(data['inter'][pol], color=cmap(colorInd), label=group[2]) axs[i][1].plot(data['intra'][pol], color=cmap(colorInd), label=group[2]) axs[i][0].set_ylabel('Median Correlation Metric') axs[i][0].set_title('Internode, Polarization %s' % pol) axs[i][1].set_title('Intranode, Polarization %s' % pol) xticks = np.arange(0,len(lsts),1) axs[i][0].set_xticks(xticks) axs[i][0].set_xticklabels([str(lst) for lst in lsts]) axs[i][1].set_xticks(xticks) axs[i][1].set_xticklabels([str(lst) for lst in lsts]) axs[1][1].legend() axs[1][0].set_xlabel('LST (hours)') axs[1][1].set_xlabel('LST (hours)') fig.tight_layout(pad=2) else: print('#############################################################################') print('Not enough LST coverage to show metric evolution - that plot is being skipped') print('#############################################################################') return badAnts def plotVisibilitySpectra(file,jd,use_ants='auto',badAnts=[],pols=['xx','yy']): """ Plots visibility amplitude spectra for a set of redundant baselines, labeled by inter vs. intranode baselines. Parameters --------- file: String File to calculate the spectra from jd: String JD of the night 'file' was observed on badAnts: List A list of antennas not to include in the plot pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. """ pol_labels = ['NS','EW'] plt.subplots_adjust(wspace=0.25) uv = UVData() uv.read_uvh5(file) h = cm_hookup.Hookup() x = h.get_hookup('HH') baseline_groups = get_baseline_groups(uv,use_ants="auto") freqs = uv.freq_array[0]/1000000 loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m') obstime_start = Time(uv.time_array[0],format='jd',location=loc) startTime = obstime_start.sidereal_time('mean').hour JD = int(obstime_start.jd) j = 0 fig, axs = plt.subplots(len(baseline_groups),2,figsize=(12,4*len(baseline_groups))) for orientation in baseline_groups: bls = baseline_groups[orientation] usable = 0 for i in range(len(bls)): ants = uv.baseline_to_antnums(bls[i]) if ants[0] in badAnts or ants[1] in badAnts: continue if ants[0] in use_ants and ants[1] in use_ants: usable += 1 if usable <=4: use_all = True print(f'Note: not enough baselines of orientation {orientation} - using all available baselines') elif usable <= 10: print(f'Note: only a small number of baselines of orientation {orientation} are available') use_all = False else: use_all = False for p in range(len(pols)): inter=False intra=False pol = pols[p] for i in range(len(bls)): ants = uv.baseline_to_antnums(bls[i]) ant1 = ants[0] ant2 = ants[1] if (ant1 in use_ants and ant2 in use_ants) or use_all == True: key1 = 'HH%i:A' % (ant1) n1 = x[key1].get_part_from_type('node')['E<ground'][1:] key2 = 'HH%i:A' % (ant2) n2 = x[key2].get_part_from_type('node')['E<ground'][1:] dat = np.mean(np.abs(uv.get_data(ant1,ant2,pol)),0) auto1 = np.mean(np.abs(uv.get_data(ant1,ant1,pol)),0) auto2 = np.mean(np.abs(uv.get_data(ant2,ant2,pol)),0) norm = np.sqrt(np.multiply(auto1,auto2)) dat = np.divide(dat,norm) if ant1 in badAnts or ant2 in badAnts: continue if n1 == n2: if intra is False: axs[j][p].plot(freqs,dat,color='blue',label='intranode') intra=True else: axs[j][p].plot(freqs,dat,color='blue') else: if inter is False: axs[j][p].plot(freqs,dat,color='red',label='internode') inter=True else: axs[j][p].plot(freqs,dat,color='red') axs[j][p].set_yscale('log') axs[j][p].set_title('%s: %s pol' % (orientation,pol_labels[p])) if j == 0: axs[len(baseline_groups)-1][p].set_xlabel('Frequency (MHz)') if p == 0: axs[j][p].legend() axs[j][0].set_ylabel('log(|Vij|)') axs[j][1].set_yticks([]) j += 1 fig.suptitle('Visibility spectra (JD: %i)' % (JD)) fig.subplots_adjust(top=.94,wspace=0.05) plt.show() plt.close() def plot_antenna_positions(uv, badAnts={},flaggedAnts={},use_ants='auto'): """ Plots the positions of all antennas that have data, colored by node. Parameters ---------- uv: UVData object Observation to extract antenna numbers and positions from badAnts: List A list of flagged or bad antennas. These will be outlined in black in the plot. flaggedAnts: Dict A dict of antennas flagged by ant_metrics with value corresponding to color in ant_metrics plot """ plt.figure(figsize=(12,10)) nodes, antDict, inclNodes = generate_nodeDict(uv) N = len(inclNodes) cmap = plt.get_cmap('tab20') i = 0 nodePos = geo_sysdef.read_nodes() antPos = geo_sysdef.read_antennas() ants = geo_sysdef.read_antennas() nodes = geo_sysdef.read_nodes() firstNode = True for n, info in nodes.items(): firstAnt = True if n > 9: n = str(n) else: n = f'0{n}' if n in inclNodes: color = cmap(round(20/N*i)) i += 1 for a in info['ants']: width = 0 widthf = 0 if a in badAnts: width = 2 if a in flaggedAnts.keys(): widthf = 6 station = 'HH{}'.format(a) try: this_ant = ants[station] except KeyError: continue x = this_ant['E'] y = this_ant['N'] if a in use_ants: falpha = 0.5 else: falpha = 0.1 if firstAnt: if a in badAnts or a in flaggedAnts.keys(): if falpha == 0.1: plt.plot(x,y,marker="h",markersize=40,color=color,alpha=falpha, markeredgecolor='black',markeredgewidth=0) plt.annotate(a, [x-1, y]) continue plt.plot(x,y,marker="h",markersize=40,color=color,alpha=falpha,label=str(n), markeredgecolor='black',markeredgewidth=0) if a in flaggedAnts.keys(): plt.plot(x,y,marker="h",markersize=40,color=color, markeredgecolor=flaggedAnts[a],markeredgewidth=widthf, markerfacecolor="None") if a in badAnts: plt.plot(x,y,marker="h",markersize=40,color=color, markeredgecolor='black',markeredgewidth=width, markerfacecolor="None") else: if falpha == 0.1: plt.plot(x,y,marker="h",markersize=40,color=color,alpha=falpha, markeredgecolor='black',markeredgewidth=0) plt.annotate(a, [x-1, y]) continue plt.plot(x,y,marker="h",markersize=40,color=color,alpha=falpha,label=str(n), markeredgecolor='black',markeredgewidth=width) firstAnt = False else: plt.plot(x,y,marker="h",markersize=40,color=color,alpha=falpha, markeredgecolor='black',markeredgewidth=0) if a in flaggedAnts.keys() and a in use_ants: plt.plot(x,y,marker="h",markersize=40,color=color, markeredgecolor=flaggedAnts[a],markeredgewidth=widthf, markerfacecolor="None") if a in badAnts and a in use_ants: plt.plot(x,y,marker="h",markersize=40,color=color, markeredgecolor='black',markeredgewidth=width, markerfacecolor="None") plt.annotate(a, [x-1, y]) if firstNode: plt.plot(info['E'], info['N'], '*', color='gold',markersize=20,label='Node Box', markeredgecolor='k',markeredgewidth=1) firstNode = False else: plt.plot(info['E'], info['N'], '*', color='gold',markersize=20,markeredgecolor='k',markeredgewidth=1) plt.legend(title='Node Number',bbox_to_anchor=(1.15,0.9),markerscale=0.5,labelspacing=1.5) plt.xlabel('East') plt.ylabel('North') plt.show() plt.close() def plot_lst_coverage(uvd): """ Plots the LST and JD coverage for a particular night. Parameters ---------- uvd: UVData Object Object containing a whole night of data, used to extract the time array. """ lsts = uvd.lst_array*3.819719 jds = np.unique(uvd.time_array) alltimes = np.arange(np.floor(jds[0]),np.ceil(jds[0]),jds[2]-jds[1]) df = jds[2]-jds[1] truetimes = [np.min(np.abs(jds-jd))<=df*0.6 for jd in alltimes] usetimes = np.tile(np.asarray(truetimes),(20,1)) fig = plt.figure(figsize=(20,2)) ax = fig.add_subplot() im = ax.imshow(usetimes, aspect='auto',cmap='RdYlGn',vmin=0,vmax=1,interpolation='nearest') fig.colorbar(im) ax.set_yticklabels([]) ax.set_yticks([]) if len(alltimes) <= 15: xticks = [int(i) for i in np.linspace(0,len(alltimes)-1,len(alltimes))] else: xticks = [int(i) for i in np.linspace(0,len(alltimes)-1,14)] ax.set_xticks(xticks) ax.set_xticklabels(np.around(alltimes[xticks],2)) ax.set_xlabel('JD') ax.set_title('LST (hours)') ax2 = ax.twiny() ax2.set_xticks(xticks) jds = alltimes[xticks] lstlabels = [] loc = EarthLocation.from_geocentric(*uvd.telescope_location, unit='m') for jd in jds: t = Time(jd,format='jd',location=loc) lstlabels.append(t.sidereal_time('mean').hour) ax2.set_xticklabels(np.around(lstlabels,2)) ax2.set_label('LST (hours)') ax2.tick_params(labelsize=12) plt.show() plt.close() def plotEvenOddWaterfalls(uvd_sum, uvd_diff): """Plot Even/Odd visibility ratio waterfall. Parameters ---------- uvd_sum : UVData Object Object containing autos from sum files uvd_diff : UVData Object Object containing autos from diff files Returns ------- None """ nants = len(uvd_sum.get_ants()) freqs = uvd_sum.freq_array[0]*1e-6 nfreqs = len(freqs) lsts = np.unique(uvd_sum.lst_array*3.819719) sm = np.abs(uvd_sum.data_array[:,0,:,0]) df = np.abs(uvd_diff.data_array[:,0,:,0]) sm = np.r_[sm, np.nan + np.zeros((-len(sm) % nants,len(freqs)))] sm = np.nanmean(sm.reshape(-1,nants,nfreqs), axis=1) df = np.r_[df, np.nan + np.zeros((-len(df) % nants,len(freqs)))] df = np.nanmean(df.reshape(-1,nants,nfreqs), axis=1) evens = (sm + df)/2 odds = (sm - df)/2 rat = np.divide(evens,odds) rat = np.nan_to_num(rat) fig = plt.figure(figsize=(14,3)) ax = fig.add_subplot() my_cmap = copy.deepcopy(matplotlib.cm.get_cmap('viridis')) my_cmap.set_under('r') my_cmap.set_over('r') im = plt.imshow(rat,aspect='auto',vmin=0.5,vmax=2,cmap=my_cmap,interpolation='nearest') fig.colorbar(im) ax.set_title('Even/odd Visibility Ratio') ax.set_xlabel('Frequency (MHz)') ax.set_ylabel('Time (LST)') yticks = [int(i) for i in np.linspace(len(lsts)-1,0, 4)] ax.set_yticks(yticks) ax.set_yticklabels(np.around(lsts[yticks], 1)) xticks = [int(i) for i in np.linspace(0,len(freqs)-1, 10)] ax.set_xticks(xticks) ax.set_xticklabels(np.around(freqs[xticks], 0)) i = 192 while i < len(freqs): ax.axvline(i,color='w') i += 192 plt.show() plt.close() return rat def calcEvenOddAmpMatrix(sm,df,pols=['xx','yy'],nodes='auto', badThresh=0.35, plotRatios=False): """ Calculates a matrix of phase correlations between antennas, where each pixel is calculated as (even/abs(even)) * (conj(odd)/abs(odd)), and then averaged across time and frequency. Paramters: --------- sm: UVData Object Sum observation. df: UVData Object Diff observation. Must be the same time of observation as sm. pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. nodes: String or List Nodes to include in matrix. Default is 'auto', which generates a list of all nodes included in the provided data files. badThresh: Float Threshold correlation metric value to use for flagging bad antennas. Returns: ------- data: Dict Dictionary containing calculated values, formatted as data[polarization][ant1,ant2]. badAnts: List List of antennas that were flagged as bad based on badThresh. """ if sm.time_array[0] != df.time_array[0]: print('FATAL ERROR: Sum and diff files are not from the same observation!') return None if nodes=='auto': nodeDict, antDict, inclNodes = generate_nodeDict(sm) nants = len(sm.get_ants()) data = {} antnumsAll = sort_antennas(sm) badAnts = [] for p in range(len(pols)): pol = pols[p] data[pol] = np.empty((nants,nants)) for i in range(len(antnumsAll)): thisAnt = [] for j in range(len(antnumsAll)): ant1 = antnumsAll[i] ant2 = antnumsAll[j] s = sm.get_data(ant1,ant2,pol) d = df.get_data(ant1,ant2,pol) even = (s + d)/2 even = np.divide(even,np.abs(even)) odd = (s - d)/2 odd = np.divide(odd,np.abs(odd)) product = np.multiply(even,np.conj(odd)) data[pol][i,j] = np.abs(np.nanmean(product)) thisAnt.append(np.abs(np.nanmean(product))) pgood = np.count_nonzero(~np.isnan(thisAnt))/len(thisAnt) if (np.nanmedian(thisAnt) < badThresh or pgood<0.2) and antnumsAll[i] not in badAnts: if pol[0]==pol[1]: #Don't assign bad ants based on cross pols badAnts.append(antnumsAll[i]) if plotRatios is True: if len(pols) == 4: data['xx-xy'] = np.subtract(data['xx'],data['xy']) data['xx-yx'] = np.subtract(data['xx'],data['yx']) data['yy-xy'] = np.subtract(data['yy'],data['xy']) data['yy-yx'] = np.subtract(data['yy'],data['yx']) else: print('Can only calculate differences if cross pols were specified') polAnts = {} badAnts = [] subs = ['xx-xy','xx-yx','yy-xy','yy-yx'] for k in subs: for i,ant in enumerate(antnumsAll): dat = data[k][i,:] if np.nanmedian(dat) < 0: if ant in polAnts.keys(): polAnts[ant] = polAnts[ant] + 1 else: polAnts[ant] = 1 if polAnts[ant] == 4: badAnts.append(ant) return data, badAnts def plotCorrMatrix(uv,data,pols=['xx','yy'],vminIn=0,vmaxIn=1,nodes='auto',logScale=False,plotRatios=False): """ Plots a matrix representing the phase correlation of each baseline. Parameters: ---------- uv: UVData Object Observation used for calculating the correlation metric data: Dict Dictionary containing the correlation metric for each baseline and each polarization. Formatted as data[polarization] [ant1,ant2] pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. vminIn: float Lower limit of colorbar. Default is 0. vmaxIn: float Upper limit of colorbar. Default is 1. nodes: Dict Dictionary containing the nodes (and their constituent antennas) to include in the matrix. Formatted as nodes[Node #][Ant List, Snap # List, Snap Location List]. logScale: Bool Option to put colormap on a logarithmic scale. Default is False. """ if nodes=='auto': nodeDict, antDict, inclNodes = generate_nodeDict(uv) nantsTotal = len(uv.get_ants()) power = np.empty((nantsTotal,nantsTotal)) fig, axs = plt.subplots(2,2,figsize=(16,16)) dirs = ['NN','EE','NE','EN'] cmap='plasma' if plotRatios is True: pols = ['xx-xy','yy-xy','xx-yx','yy-yx'] dirs=pols vminIn=-1 cmap='seismic' loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m') jd = uv.time_array[0] t = Time(jd,format='jd',location=loc) lst = round(t.sidereal_time('mean').hour,2) t.format='fits' antnumsAll = sort_antennas(uv) i = 0 for p in range(len(pols)): if p >= 2: i=1 pol = pols[p] nants = len(antnumsAll) if logScale is True: im = axs[i][p%2].imshow(data[pol],cmap=cmap,origin='upper',extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],norm=LogNorm(vmin=vminIn, vmax=vmaxIn)) else: im = axs[i][p%2].imshow(data[pol],cmap=cmap,origin='upper',extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],vmin=vminIn, vmax=vmaxIn) axs[i][p%2].set_xticks(np.arange(0,nantsTotal)+1) axs[i][p%2].set_xticklabels(antnumsAll,rotation=90,fontsize=6) axs[i][p%2].xaxis.set_ticks_position('top') axs[i][p%2].set_title('polarization: ' + dirs[p] + '\n') n=0 for node in sorted(inclNodes): n += len(nodeDict[node]['ants']) axs[i][p%2].axhline(len(antnumsAll)-n+.5,lw=4) axs[i][p%2].axvline(n+.5,lw=4) axs[i][p%2].text(n-len(nodeDict[node]['ants'])/2,-.5,node) axs[i][p%2].text(.42,-.05,'Node Number',transform=axs[i][p%2].transAxes) n=0 for node in sorted(inclNodes): n += len(nodeDict[node]['ants']) axs[0][1].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node) axs[1][1].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node) axs[0][1].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][1].transAxes) axs[0][1].set_yticklabels([]) axs[0][1].set_yticks([]) axs[0][0].set_yticks(np.arange(nantsTotal,0,-1)) axs[0][0].set_yticklabels(antnumsAll,fontsize=6) axs[0][0].set_ylabel('Antenna Number') axs[1][1].text(1.05,0.4,'Node Number',rotation=270,transform=axs[1][1].transAxes) axs[1][1].set_yticklabels([]) axs[1][1].set_yticks([]) axs[1][0].set_yticks(np.arange(nantsTotal,0,-1)) axs[1][0].set_yticklabels(antnumsAll,fontsize=6) axs[1][0].set_ylabel('Antenna Number') cbar_ax = fig.add_axes([0.98,0.18,0.015,0.6]) cbar_ax.set_xlabel('|V|', rotation=0) cbar = fig.colorbar(im, cax=cbar_ax) fig.suptitle('Correlation Matrix - JD: %s, LST: %.0fh' % (str(jd),np.round(lst,0))) fig.subplots_adjust(top=1.28,wspace=0.05,hspace=1.1) fig.tight_layout(pad=2) plt.show() plt.close() def plot_single_matrix(uv,data,vminIn=0,vmaxIn=1,nodes='auto',logScale=False): if nodes=='auto': nodeDict, antDict, inclNodes = generate_nodeDict(uv) nantsTotal = len(uv.get_ants()) power = np.empty((nantsTotal,nantsTotal)) fig, axs = plt.subplots(1,1,figsize=(16,16)) loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m') jd = uv.time_array[0] t = Time(jd,format='jd',location=loc) lst = round(t.sidereal_time('mean').hour,2) t.format='fits' antnumsAll = sort_antennas(uv) nants = len(antnumsAll) if logScale is True: im = axs[0][0].imshow(data[pol],cmap='plasma',origin='upper', extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],norm=LogNorm(vmin=vminIn, vmax=vmaxIn)) else: im = axs[0][0].imshow(data[pol],cmap='plasma',origin='upper',extent= [0.5,nantsTotal+.5,0.5,nantsTotal+0.5],vmin=vminIn, vmax=vmaxIn) axs[0][0].set_xticks(np.arange(0,nantsTotal)+1) axs[0][0].set_xticklabels(antnumsAll,rotation=90,fontsize=6) axs[0][0].xaxis.set_ticks_position('top') axs[0][0].set_title('polarization: ' + dirs[p] + '\n') n=0 for node in sorted(inclNodes): n += len(nodeDict[node]['ants']) axs[0][0].axhline(len(antnumsAll)-n+.5,lw=4) axs[0][0].axvline(n+.5,lw=4) axs[0][0].text(n-len(nodeDict[node]['ants'])/2,-.5,node) axs[0][0].text(.42,-.05,'Node Number',transform=axs[0][0].transAxes) n=0 for node in sorted(inclNodes): n += len(nodeDict[node]['ants']) axs[0][0].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node) axs[0][0].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][0].transAxes) axs[0][0].set_yticks(np.arange(nantsTotal,0,-1)) axs[0][0].set_yticklabels(antnumsAll,fontsize=6) axs[0][0].set_ylabel('Antenna Number') axs[0][0].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][0].transAxes) cbar_ax = fig.add_axes([0.98,0.18,0.015,0.6]) cbar_ax.set_xlabel('|V|', rotation=0) cbar = fig.colorbar(im, cax=cbar_ax) fig.suptitle('Correlation Matrix - JD: %s, LST: %.0fh' % (str(jd),np.round(lst,0))) fig.subplots_adjust(top=1.28,wspace=0.05,hspace=1.1) fig.tight_layout(pad=2) plt.show() plt.close() def get_hourly_files(uv, HHfiles, jd): """ Generates a list of files spaced one hour apart throughout a night of observation, and the times those files were observed. Parameters: ---------- uv: UVData Object Sample observation from the given night, used only for grabbing the telescope location HHFiles: List List of all files from the desired night of observation jd: String JD of the night of observation Returns: ------- use_files: List List of files separated by one hour use_lsts: List List of LSTs of observations in use_files """ use_lsts = [] use_files = [] use_file_inds = [] loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m') for i,file in enumerate(HHfiles): try: dat = UVData() dat.read(file, read_data=False) except KeyError: continue jd = dat.time_array[0] t = Time(jd,format='jd',location=loc) lst = round(t.sidereal_time('mean').hour,2) if np.round(lst,0) == 24: continue if np.abs((lst-np.round(lst,0)))<0.05: if len(use_lsts)>0 and np.abs(use_lsts[-1]-lst)<0.5: if np.abs((lst-np.round(lst,0))) < abs((use_lsts[-1]-np.round(lst,0))): use_lsts[-1] = lst use_files[-1] = file use_file_inds[-1] = i else: use_lsts.append(lst) use_files.append(file) use_file_inds.append(i) return use_files, use_lsts, use_file_inds def get_baseline_groups(uv, bl_groups=[(14,0,'14m E-W'),(29,0,'29m E-W'),(14,-11,'14m NW-SE'),(14,11,'14m SW-NE')], use_ants='auto'): """ Generate dictionary containing baseline groups. Parameters: ---------- uv: UVData Object Observation to extract antenna position information from bl_groups: List Desired baseline types to extract, formatted as (length (float), N-S separation (float), label (string)) Returns: -------- bls: Dict Dictionary containing list of lists of redundant baseline numbers, formatted as bls[group label] """ bls={} baseline_groups,vec_bin_centers,lengths = uv.get_redundancies(use_antpos=False,include_autos=False) for i in range(len(baseline_groups)): bl = baseline_groups[i] for group in bl_groups: if np.abs(lengths[i]-group[0])<1: ant1 = uv.baseline_to_antnums(bl[0])[0] ant2 = uv.baseline_to_antnums(bl[0])[1] if use_ants == 'auto' or (ant1 in use_ants and ant2 in use_ants): antPos1 = uv.antenna_positions[np.argwhere(uv.antenna_numbers == ant1)] antPos2 = uv.antenna_positions[np.argwhere(uv.antenna_numbers == ant2)] disp = (antPos2-antPos1)[0][0] if np.abs(disp[2]-group[1])<0.5: bls[group[2]] = bl return bls def get_correlation_baseline_evolutions(uv,HHfiles,jd,use_ants='auto',badThresh=0.35,pols=['xx','yy'],bl_type=(14,0,'14m E-W'), removeBadAnts=False, plotMatrix=True,mat_pols=['xx','yy','xy','yx'],plotRatios=False): """ Calculates the average correlation metric for a set of redundant baseline groups at one hour intervals throughout a night of observation. Parameters: ---------- uv: UVData Object Sample observation from the desired night, used only for getting telescope location information. HHfiles: List List of all files for a night of observation jd: String JD of the given night of observation badThresh: Float Threshold correlation metric value to use for flagging bad antennas. Default is 0.35. pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. bl_type: Tuple Redundant baseline group to calculate correlation metric for. Default is 14m E-W baselines removeBadAnts: Bool Option to exclude antennas marked as bad from calculation. Default is False. plotMatrix: Bool Option to plot the correlation matrix for observations once each hour. Default is True. Returns: ------- result: Dict Per hour correlation metric, formatted as result[baseline type]['inter' or 'intra'][polarization] lsts: List LSTs that metric was calculated for, spaced 1 hour apart. bad_antennas: List Antenna numbers flagged as bad based on badThresh parameter. """ files, lsts, inds = get_hourly_files(uv, HHfiles, jd) if use_ants == 'auto': use_ants = uv.get_ants() if plotRatios is True: files = [files[len(files)//2]] nTimes=1 else: nTimes = len(files) if nTimes > 3: plotTimes = [0,nTimes//2,nTimes-1] else: plotTimes = np.arange(0,nTimes,1) nodeDict, antDict, inclNodes = generate_nodeDict(uv) JD = math.floor(uv.time_array[0]) bad_antennas = [] pols = mat_pols corrSummary = generateDataTable(uv,pols=pols) result = {} for f in range(nTimes): file = files[f] ind = inds[f] sm = UVData() df = UVData() try: # print(f'Trying to read {file}') sm.read(file, skip_bad_files=True, antenna_nums=use_ants) dffile = '%sdiff%s' % (file[0:-8],file[-5:]) df.read(dffile, skip_bad_files=True, antenna_nums=use_ants) except: i = -5 read = False while i<5 and read==False: try: file = HHfiles[ind+i] # print(f'trying to read {file}') sm.read(file, skip_bad_files=True, antenna_nums=use_ants) dffile = '%sdiff%s' % (file[0:-8],file[-5:]) df.read(dffile, skip_bad_files=True, antenna_nums=use_ants) read = True except: i += 1 if read == False: print(f'WARNING: unable to read {file}') continue matrix, badAnts = calcEvenOddAmpMatrix(sm,df,nodes='auto',pols=mat_pols,badThresh=badThresh,plotRatios=plotRatios) bad_antennas=badAnts if plotMatrix is True and f in plotTimes: plotCorrMatrix(sm, matrix, pols=mat_pols, nodes='auto',plotRatios=plotRatios) for group in bl_type: medians = { 'inter' : {}, 'intra' : {} } for pol in pols: medians['inter'][pol] = [] medians['intra'][pol] = [] if group[2] not in result.keys(): result[group[2]] = { 'inter' : {}, 'intra' : {} } for pol in pols: result[group[2]]['inter'][pol] = [] result[group[2]]['intra'][pol] = [] bls = get_baseline_type(uv,bl_type=group,use_ants=use_ants) if bls == None: # print(f'No baselines of type {group}') continue baselines = [uv.baseline_to_antnums(bl) for bl in bls] if removeBadAnts is True: nodeInfo = { 'inter' : getInternodeMedians(sm,matrix,badAnts=bad_antennas, baselines=baselines,pols=pols), 'intra' : getIntranodeMedians(sm,matrix,badAnts=bad_antennas, baselines=baselines,pols=pols) } else: nodeInfo = { 'inter' : getInternodeMedians(sm,matrix, baselines=baselines,pols=pols), 'intra' : getIntranodeMedians(sm,matrix,baselines=baselines,pols=pols) } for node in nodeDict: for pol in pols: corrSummary[node][pol]['inter'].append(nodeInfo['inter'][node][pol]) corrSummary[node][pol]['intra'].append(nodeInfo['intra'][node][pol]) medians['inter'][pol].append(nodeInfo['inter'][node][pol]) medians['intra'][pol].append(nodeInfo['intra'][node][pol]) for pol in pols: result[group[2]]['inter'][pol].append(np.nanmedian(medians['inter'][pol])) result[group[2]]['intra'][pol].append(np.nanmedian(medians['intra'][pol])) return result,lsts,bad_antennas def generateDataTable(uv,pols=['xx','yy']): """ Simple helper function to generate an empty dictionary of the format desired for get_correlation_baseline_evolutions() Parameters: ---------- uv: UVData Object Sample observation to extract node and antenna information from. pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. Default is ['xx','yy']. Returns: ------- dataObject: Dict Empty dict formatted as dataObject[node #][polarization]['inter' or 'intra'] """ nodeDict, antDict, inclNodes = generate_nodeDict(uv) dataObject = {} for node in nodeDict: dataObject[node] = {} for pol in pols: dataObject[node][pol] = { 'inter' : [], 'intra' : [] } return dataObject def getInternodeMedians(uv,data,pols=['xx','yy'],badAnts=[],baselines='all'): """ Identifies internode baseliens and performs averaging of correlation metric. Parameters: ---------- uv: UVData Object Sample observation to extract node and antenna information from. data: Dict Dictionary containing correlation metric information, formatted as data[polarization][ant1,ant2]. pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. Default is ['xx','yy']. badAnts: List List of antennas that have been flagged as bad - if provided, they will be excluded from averaging. baselines: List List of baseline types to include in calculation. Returns: ------- nodeMeans: Dict Per-node averaged correlation metrics, formatted as nodeMeans[node #][polarization]. """ nodeDict, antDict, inclNodes = generate_nodeDict(uv) antnumsAll=sort_antennas(uv) nants = len(antnumsAll) nodeMeans = {} nodeCorrs = {} for node in nodeDict: nodeCorrs[node] = {} nodeMeans[node] = {} for pol in pols: nodeCorrs[node][pol] = [] start=0 h = cm_hookup.Hookup() x = h.get_hookup('HH') for pol in pols: for i in range(nants): for j in range(nants): ant1 = antnumsAll[i] ant2 = antnumsAll[j] if ant1 not in badAnts and ant2 not in badAnts and ant1 != ant2: if baselines=='all' or (ant1,ant2) in baselines: key1 = 'HH%i:A' % (ant1) n1 = x[key1].get_part_from_type('node')['E<ground'][1:] key2 = 'HH%i:A' % (ant2) n2 = x[key2].get_part_from_type('node')['E<ground'][1:] dat = data[pol][i,j] if n1 != n2: nodeCorrs[n1][pol].append(dat) nodeCorrs[n2][pol].append(dat) for node in nodeDict: for pol in pols: nodeMeans[node][pol] = np.nanmedian(nodeCorrs[node][pol]) return nodeMeans def getIntranodeMedians(uv, data, pols=['xx','yy'],badAnts=[],baselines='all'): """ Identifies intranode baseliens and performs averaging of correlation metric. Parameters: ---------- uv: UVData Object Sample observation to extract node and antenna information from. data: Dict Dictionary containing correlation metric information, formatted as data[polarization][ant1,ant2]. pols: List Polarizations to plot. Can include any polarization strings accepted by pyuvdata. Default is ['xx','yy']. badAnts: List List of antennas that have been flagged as bad - if provided, they will be excluded from averaging. baselines: List List of baseline types to include in calculation. Returns: ------- nodeMeans: Dict Per-node averaged correlation metrics, formatted as nodeMeans[node #][polarization]. """ nodeDict, antDict, inclNodes = generate_nodeDict(uv) antnumsAll=sort_antennas(uv) nodeMeans = {} start=0 for node in nodeDict: nodeMeans[node]={} for pol in pols: nodeCorrs = [] for i in range(start,start+len(nodeDict[node]['ants'])): for j in range(start,start+len(nodeDict[node]['ants'])): ant1 = antnumsAll[i] ant2 = antnumsAll[j] if ant1 not in badAnts and ant2 not in badAnts and i != j: if baselines=='all' or (ant1,ant2) in baselines: nodeCorrs.append(data[pol][i,j]) nodeMeans[node][pol] = np.nanmedian(nodeCorrs) start += len(nodeDict[node]['ants']) return nodeMeans def get_baseline_type(uv,bl_type=(14,0,'14m E-W'),use_ants='auto'): """ Parameters: ---------- uv: UVData Object Sample observation to get baseline information from. bl_type: Tuple Redundant baseline group to extract baseline numbers for. Formatted as (length, N-S separation, label). Returns: ------- bl: List List of lists of redundant baseline numbers. Returns None if the provided bl_type is not found. """ baseline_groups,vec_bin_centers,lengths = uv.get_redundancies(use_antpos=False,include_autos=False) for i in range(len(baseline_groups)): bl = baseline_groups[i] if np.abs(lengths[i]-bl_type[0])<1: ant1 = uv.baseline_to_antnums(bl[0])[0] ant2 = uv.baseline_to_antnums(bl[0])[1] if (ant1 in use_ants and ant2 in use_ants) or use_ants == 'auto': antPos1 = uv.antenna_positions[np.argwhere(uv.antenna_numbers == ant1)] antPos2 = uv.antenna_positions[np.argwhere(uv.antenna_numbers == ant2)] disp = (antPos2-antPos1)[0][0] if np.abs(disp[2]-bl_type[1])<0.5: return bl return None def generate_nodeDict(uv): """ Generates dictionaries containing node and antenna information. Parameters: ---------- uv: UVData Object Sample observation to extract node and antenna information from. Returns: ------- nodes: Dict Dictionary containing entry for all nodes, each of which has keys: 'ants', 'snapLocs', 'snapInput'. antDict: Dict Dictionary containing entry for all antennas, each of which has keys: 'node', 'snapLocs', 'snapInput'. inclNodes: List Nodes that have hooked up antennas. """ antnums = uv.get_ants() h = cm_hookup.Hookup() x = h.get_hookup('HH') nodes = {} antDict = {} inclNodes = [] for ant in antnums: key = 'HH%i:A' % (ant) n = x[key].get_part_from_type('node')['E<ground'][1:] snapLoc = (x[key].hookup['E<ground'][-1].downstream_input_port[-1], ant) snapInput = (x[key].hookup['E<ground'][-2].downstream_input_port[1:], ant) antDict[ant] = {} antDict[ant]['node'] = str(n) antDict[ant]['snapLocs'] = snapLoc antDict[ant]['snapInput'] = snapInput inclNodes.append(n) if n in nodes: nodes[n]['ants'].append(ant) nodes[n]['snapLocs'].append(snapLoc) nodes[n]['snapInput'].append(snapInput) else: nodes[n] = {} nodes[n]['ants'] = [ant] nodes[n]['snapLocs'] = [snapLoc] nodes[n]['snapInput'] = [snapInput] inclNodes = np.unique(inclNodes) return nodes, antDict, inclNodes def sort_antennas(uv): """ Helper function that sorts antennas by snap input number. Parameters: ---------- uv: UVData Object Sample observation used for extracting node and antenna information. Returns: ------- sortedAntennas: List All antennas with data, sorted into order of ascending node number, and within that by ascending snap number, and within that by ascending snap input number. """ nodes, antDict, inclNodes = generate_nodeDict(uv) sortedAntennas = [] for n in sorted(inclNodes): snappairs = [] h = cm_hookup.Hookup() x = h.get_hookup('HH') for ant in nodes[n]['ants']: snappairs.append(antDict[ant]['snapLocs']) snapLocs = {} locs = [] for pair in snappairs: ant = pair[1] loc = pair[0] locs.append(loc) if loc in snapLocs: snapLocs[loc].append(ant) else: snapLocs[loc] = [ant] locs = sorted(np.unique(locs)) ants_sorted = [] for loc in locs: ants = snapLocs[loc] inputpairs = [] for ant in ants: key = 'HH%i:A' % (ant) pair = (int(x[key].hookup['E<ground'][-2].downstream_input_port[1:]), ant) inputpairs.append(pair) for _,a in sorted(inputpairs): ants_sorted.append(a) for ant in ants_sorted: sortedAntennas.append(ant) return sortedAntennas def plot_crosses(uvd, ref_ant): ants = uvd.get_ants() freqs = (uvd.freq_array[0])*10**(-6) times = uvd.time_array lsts = uvd.lst_array Nants = len(ants) # Nside = int(np.ceil(np.sqrt(Nants)))*3 Nside = 4 Yside = int(np.ceil(float(Nants)/Nside)) t_index = 0 jd = times[t_index] utc = Time(jd, format='jd').datetime xlim = (np.min(freqs), np.max(freqs)) ylim = (60, 90) fig, axes = plt.subplots(Yside, Nside, figsize=(Yside*2, Nside*60)) fig.suptitle("JD = {0}, time = {1} UTC".format(jd, utc), fontsize=10) fig.tight_layout(rect=(0, 0, 1, 0.95)) fig.subplots_adjust(left=.1, bottom=.1, right=.9, top=.9, wspace=0.05, hspace=0.2) k = 0 for i in range(Yside): for j in range(Nside): ax = axes[i,j] ax.set_xlim(xlim) # ax.set_ylim(ylim) if k < Nants: px, = ax.plot(freqs, 10*np.log10(np.abs(np.mean(uvd.get_data((ants[k], ref_ant, 'xx')),axis=0))), color='red', alpha=0.75, linewidth=1) py, = ax.plot(freqs, 10*np.log10(np.abs(np.mean(uvd.get_data((ants[k], ref_ant, 'yy')),axis=0))), color='darkorange', alpha=0.75, linewidth=1) pxy, = ax.plot(freqs, 10*np.log10(np.abs(np.mean(uvd.get_data((ants[k], ref_ant, 'xy')),axis=0))), color='royalblue', alpha=0.75, linewidth=1) pyx, = ax.plot(freqs, 10*np.log10(np.abs(np.mean(uvd.get_data((ants[k], ref_ant, 'yx')),axis=0))), color='darkviolet', alpha=0.75, linewidth=1) ax.grid(False, which='both') ax.set_title(str(ants[k]), fontsize=14) if k == 0: ax.legend([px, py, pxy, pyx], ['XX', 'YY', 'XY','YX']) else: ax.axis('off') if j != 0: ax.set_yticklabels([]) else: [t.set_fontsize(10) for t in ax.get_yticklabels()] ax.set_ylabel(r'$10\cdot\log_{10}$ amplitude', fontsize=10) if i != Yside-1: ax.set_xticklabels([]) else: [t.set_fontsize(10) for t in ax.get_xticklabels()] ax.set_xlabel('freq (MHz)', fontsize=10) k += 1 fig.show() plt.close() def gather_source_list(): sources = [] sources.append((50.6750,-37.2083,'Fornax A')) sources.append((201.3667,-43.0192,'Cen A')) # sources.append((83.6333,22.0144,'Taurus A')) sources.append((252.7833,4.9925,'Hercules A')) sources.append((139.5250,-12.0947,'Hydra A')) sources.append((79.9583,-45.7789,'Pictor A')) sources.append((187.7042,12.3911,'Virgo A')) sources.append((83.8208,-59.3897,'Orion A')) sources.append((80.8958,-69.7561,'LMC')) sources.append((13.1875,-72.8286,'SMC')) sources.append((201.3667,-43.0192,'Cen A')) sources.append((83.6333,20.0144,'Crab Pulsar')) sources.append((128.8375,-45.1764,'Vela SNR')) cat_path = f'{DATA_PATH}/G4Jy_catalog.tsv' cat = open(cat_path) f = csv.reader(cat,delimiter='\n') for row in f: if len(row)>0 and row[0][0]=='J': s = row[0].split(';') tup = (float(s[1]),float(s[2]),'') sources.append(tup) return sources def _clean_per_bl_pol(bl, pol, uvd, uvd_diff, area, tol, skip_wgts, freq_range): """ CLEAN function of delay spectra at given baseline and polarization. Parameters: ---------- bl: Tuple Tuple of baseline (ant1, ant2) pol: String String of polarization uvd: UVData Object Sample observation from the desired night to compute delay spectra uvd_diff: UVData Object Diff of observation from the desired night to calculate even/odd visibilities and delay spectra area: Float The half-width (i.e. the width of the positive part) of the region in fourier space, symmetric about 0, that is filtered out in ns. tol: Float CLEAN algorithm convergence tolerance (see aipy.deconv.clean) skip_wgts: Float Skips filtering rows with very low total weight (unflagged fraction ~< skip_wgt). See uvtools.dspec.high_pass_fourier_filter for more details freq_range: Float Frequecy range for making delay spectra in MHz Returns: ------- d_even: Dict CLEANed even visibilities, formatted as _d_even[(ant1, ant2, pol)] d_odd: Dict CLEANed odd visibilities, formatted as _d_odd[(ant1, ant2, pol)] """ key = (bl[0], bl[1], pol) freqs = uvd.freq_array[0] FM_idx = np.searchsorted(freqs*1e-6, [85,110]) flag_FM = np.zeros(freqs.size, dtype=bool) flag_FM[FM_idx[0]:FM_idx[1]] = True freq_low, freq_high = np.sort(freq_range) idx_freqs = np.where(np.logical_and(freqs*1e-6 > freq_low, freqs*1e-6 < freq_high))[0] freqs = freqs[idx_freqs] data = uvd.get_data(key)[:, idx_freqs] diff = uvd_diff.get_data(key)[:, idx_freqs] wgts = (~uvd.get_flags(key)*~flag_FM[np.newaxis,:])[:, idx_freqs].astype(float) idx_zero = np.where(np.abs(data) == 0)[0] if(len(idx_zero)/len(data) < 0.5): d_even = (data+diff)*0.5 d_odd = (data-diff)*0.5 d_even_cl, d_even_rs, _ = dspec.high_pass_fourier_filter(d_even, wgts, area*1e-9, freqs[1]-freqs[0], tol=tol, skip_wgt=skip_wgts, window='bh7') d_odd_cl, d_odd_rs, _ = dspec.high_pass_fourier_filter(d_odd, wgts, area*1e-9, freqs[1]-freqs[0], tol=tol, skip_wgt=skip_wgts, window='bh7') idx = np.where(np.mean(np.abs(d_even_cl), axis=1) == 0)[0] d_even_cl[idx] = np.nan d_even_rs[idx] = np.nan idx = np.where(np.mean(np.abs(d_odd_cl), axis=1) == 0)[0] d_odd_cl[idx] = np.nan d_odd_rs[idx] = np.nan d_even = d_even_cl+d_even_rs d_odd = d_odd_cl+d_odd_rs else: d_even = np.zeros_like(data) d_odd = np.zeros_like(data) return d_even, d_odd def clean_ds(bls, uvd_ds, uvd_diff, area=500., tol=1e-7, skip_wgts=0.2, N_threads=12, freq_range=[45,240], pols=['nn', 'ee', 'ne', 'en'], return_option='all'): _data_cleaned_sq, d_even, d_odd = {}, {}, {} if isinstance(area, float) or isinstance(area, int): area = np.array(area).repeat(len(bls)) # Set up multiprocessing and the CLEAM will work inside "func_clean_ds_mpi" function queue = Queue() for rank in range(N_threads): p = Process(target=func_clean_ds_mpi, args=(rank, queue, N_threads, bls, pols, uvd_ds, uvd_diff, area, tol, skip_wgts, freq_range)) p.start() # Collect the CLEANed data from different threads for rank in range(N_threads): data = queue.get() _d_cleaned_sq = data[0] d_e= data[1] d_o= data[2] _data_cleaned_sq = {**_data_cleaned_sq, **_d_cleaned_sq} d_even = {**d_even, **d_e} d_odd = {**d_odd, **d_o} if(return_option == 'dspec'): return _data_cleaned_sq elif(return_option == 'vis'): return d_even, d_odd elif(return_option == 'all'): return _data_cleaned_sq, d_even, d_odd def func_clean_ds_mpi(rank, queue, N_threads, bls, pols, uvd_ds, uvd_diff, area, tol, skip_wgts, freq_range): _data_cleaned_sq, d_even, d_odd = {}, {}, {} N_jobs_each_thread = len(bls)*len(pols)/N_threads k = 0 for i, bl in enumerate(bls): for j, pol in enumerate(pols): which_rank = int(k/N_jobs_each_thread) if(rank == which_rank): key = (bl[0], bl[1], pol) d_even[key], d_odd[key] = _clean_per_bl_pol(bl, pol, uvd_ds, uvd_diff, area[i], tol, skip_wgts, freq_range) win = dspec.gen_window('bh7', d_even[key].shape[1]) _d_even = np.fft.fftshift(np.fft.ifft(d_even[key]*win), axes=1) _d_odd = np.fft.fftshift(np.fft.ifft(d_odd[key]*win), axes=1) _data_cleaned_sq[key] = _d_even * _d_odd.conj() k += 1 queue.put([_data_cleaned_sq, d_even, d_odd]) def plot_wfds(uvd, _data_sq, pol): """ Waterfall diagram for autocorrelation delay spectrum Parameters: ---------- uvd: UVData Object Sample observation from the desired night, used for getting antenna information. _data_sq: Dict Square of delay spectra, formatted as _data_sq[(ant1, ant2, pol)] pol: String String of polarization """ nodes, antDict, inclNodes = generate_nodeDict(uvd) ants = uvd.get_ants() sorted_ants = sort_antennas(uvd) freqs = uvd.freq_array[0] taus = np.fft.fftshift(np.fft.fftfreq(freqs.size, np.diff(freqs)[0]))*1e9 times = uvd.time_array lsts = uvd.lst_array*3.819719 inds = np.unique(lsts,return_index=True)[1] lsts = [lsts[ind] for ind in sorted(inds)] maxants = 0 polnames = ['nn','ee','ne','en'] for node in nodes: n = len(nodes[node]['ants']) if n>maxants: maxants = n Nants = len(ants) Nside = maxants Yside = len(inclNodes) t_index = 0 jd = times[t_index] utc = Time(jd, format='jd').datetime status_colors = { 'dish_maintenance' : 'salmon', 'dish_ok' : 'red', 'RF_maintenance' : 'lightskyblue', 'RF_ok' : 'royalblue', 'digital_maintenance' : 'plum', 'digital_ok' : 'mediumpurple', 'calibration_maintenance' : 'lightgreen', 'calibration_ok' : 'green', 'calibration_triage' : 'lime'} status_abbreviations = { 'dish_maintenance' : 'dish-M', 'dish_ok' : 'dish-OK', 'RF_maintenance' : 'RF-M', 'RF_ok' : 'RF-OK', 'digital_maintenance' : 'dig-M', 'digital_ok' : 'dig-OK', 'calibration_maintenance' : 'cal-M', 'calibration_ok' : 'cal-OK', 'calibration_triage' : 'cal-Tri'} h = cm_active.ActiveData(at_date=jd) h.load_apriori() custom_lines = [] labels = [] for s in status_colors.keys(): c = status_colors[s] custom_lines.append(Line2D([0],[0],color=c,lw=2)) labels.append(s) ptitle = 1.92/(Yside*3) fig, axes = plt.subplots(Yside, Nside, figsize=(16,Yside*3)) if pol == 0: fig.suptitle("nn polarization", fontsize=14, y=1+ptitle) vmin, vmax = -50, -30 elif pol == 1: fig.suptitle("ee polarization", fontsize=14, y=1+ptitle) vmin, vmax = -50, -30 elif pol == 2: fig.suptitle("ne polarization", fontsize=14, y=1+ptitle) vmin, vmax = -50, -30 fig.legend(custom_lines,labels,bbox_to_anchor=(0.7,1),ncol=3) fig.tight_layout(rect=(0, 0, 1, 0.95)) fig.subplots_adjust(left=0, bottom=.1, right=.9, top=1, wspace=0.1, hspace=0.3) xticks = np.int32(np.ceil(np.linspace(0,len(taus)-1,5))) xticklabels = np.around(taus[xticks],0) yticks = [int(i) for i in np.linspace(0,len(lsts)-1,6)] yticklabels = [np.around(lsts[ytick],1) for ytick in yticks] for i,n in enumerate(inclNodes): ants = nodes[n]['ants'] j = 0 for _,a in enumerate(sorted_ants): if a not in ants: continue status = h.apriori[f'HH{a}:A'].status abb = status_abbreviations[status] ax = axes[i,j] key = (a, a, polnames[pol]) if(pol == 0 or pol == 1): norm = np.abs(_data_sq[key]).max(axis=1)[:,np.newaxis] elif(pol == 2): key1 = (a, a, polnames[0]) key2 = (a, a, polnames[1]) norm = np.sqrt(np.abs(_data_sq[key1])*np.abs(_data_sq[key2])).max(axis=1)[:,np.newaxis] ds = 10.*np.log10(np.sqrt(np.abs(_data_sq[key])/norm)) im = ax.imshow(ds, aspect='auto', interpolation='nearest', vmin=vmin, vmax=vmax) ax.set_title(f'{a} ({abb})', fontsize=10, backgroundcolor=status_colors[status]) if i == len(inclNodes)-1: ax.set_xticks(xticks) ax.set_xticklabels(xticklabels) ax.set_xlabel('Delay (ns)', fontsize=10) [t.set_rotation(70) for t in ax.get_xticklabels()] else: ax.set_xticks(xticks) ax.set_xticklabels([]) if j != 0: ax.set_yticks(yticks) ax.set_yticklabels([]) else: [t.set_fontsize(12) for t in ax.get_yticklabels()] ax.set_ylabel('Time (LST)', fontsize=10) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) ax.set_ylabel('Time (LST)', fontsize=10) j += 1 for k in range(j,maxants): axes[i,k].axis('off') pos = ax.get_position() cbar_ax=fig.add_axes([0.91,pos.y0,0.01,pos.height]) cbar = fig.colorbar(im, cax=cbar_ax) cbar.set_label(f'Node {n}',rotation=270, labelpad=15) # cbarticks = [np.around(x,1) for x in np.linspace(vmin,vmax,7)[i] for i in cbar.get_ticks()] # cbar.set_ticklabels(cbarticks) # axes[i,maxants-1].annotate(f'Node {n}', (.97,pos.y0+.03),xycoords='figure fraction',rotation=270) fig.show() def plot_antFeatureMap_2700ns(uvd, _data_sq, JD, pol='ee'): """ Plots the positions of all antennas that have data, colored by feature strength. Parameters ---------- uvd: UVData object Observation to extract antenna numbers and positions from _data_sq: Dict Dictionary structured as _data_sq[(antenna number, antenna number, pol)], where the values are the feature strength that will determined the color on the map. JD: Int Julian date of the data pol: String Polarization to plot """ nd = {0: {'pos': [21.427320986820824, -30.722353385032143], 'ants': [0, 1, 2, 11, 12, 13, 14, 23, 24, 25, 26, 39]}, 1: {'pos': [21.427906055943357, -30.722367970752067], 'ants': [3, 4, 5, 6, 15, 16, 17, 18, 27, 28, 29, 30]}, 2: {'pos': [21.428502498826337, -30.722356438400826], 'ants': [7, 8, 9, 10, 19, 20, 21, 31, 32, 33, 321, 323]}, 3: {'pos': [21.427102788863543, -30.72199587048034], 'ants': [36, 37, 38, 50, 51, 52, 53, 65, 66, 67, 68, 320]}, 4: {'pos': [21.427671849802184, -30.7220282862175], 'ants': [40, 41, 42, 54, 55, 56, 57, 69, 70, 71, 72, 324]}, 5: {'pos': [21.42829977472493, -30.722027118338183], 'ants': [43, 44, 45, 46, 58, 59, 60, 73, 74, 75, 76, 322]}, 6: {'pos': [21.428836727299945, -30.72219119740069], 'ants': [22, 34, 35, 47, 48, 49, 61, 62, 63, 64, 77, 78]}, 7: {'pos': [21.426862825121685, -30.72169978685838], 'ants': [81, 82, 83, 98, 99, 100, 116, 117, 118, 119, 137, 138]}, 8: {'pos': [21.427419087275524, -30.72169615183073], 'ants': [84, 85, 86, 87, 101, 102, 103, 104, 120, 121, 122, 123]}, 9: {'pos': [21.42802904166864, -30.721694142092485], 'ants': [88, 89, 90, 91, 105, 106, 107, 108, 124, 125, 126, 325]}, 10: {'pos': [21.42863899600041, -30.721692129488424], 'ants': [92, 93, 94, 109, 110, 111, 112, 127, 128, 129, 130, 328]}, 11: {'pos': [21.42914035998215, -30.721744794462655], 'ants': [79, 80, 95, 96, 97, 113, 114, 115, 131, 132, 133, 134]}, 12: {'pos': [21.426763768223857, -30.72133448059758], 'ants': [135, 136, 155, 156, 157, 158, 176, 177, 178, 179, 329, 333]}, 13: {'pos': [21.42734159294201, -30.72141297904905], 'ants': [139, 140, 141, 142, 159, 160, 161, 162, 180, 181, 182, 183]}, 14: {'pos': [21.428012089958028, -30.721403280585722], 'ants': [143, 144, 145, 146, 163, 164, 165, 166, 184, 185, 186, 187]}, 15: {'pos': [21.428561498114107, -30.721408957468245], 'ants': [147, 148, 149, 150, 167, 168, 169, 170, 188, 189, 190, 191]}, 16: {'pos': [21.42914681969319, -30.721434635693182], 'ants': [151, 152, 153, 154, 171, 172, 173, 174, 192, 193, 194, 213]}, 17: {'pos': [21.426857989080208, -30.72109992091893], 'ants': [196, 197, 198, 199, 215, 216, 217, 218, 233, 234, 235, 337]}, 18: {'pos': [21.427443064426363, -30.7210702936363], 'ants': [200, 201, 202, 203, 219, 220, 221, 222, 236, 237, 238, 239]}, 19: {'pos': [21.428053014877808, -30.72106828382215], 'ants': [204, 205, 206, 207, 223, 224, 225, 226, 240, 241, 242, 243]}, 20: {'pos': [21.428662965267904, -30.721066271142263], 'ants': [208, 209, 210, 211, 227, 228, 229, 244, 245, 246, 261, 262]}, 21: {'pos': [21.429383860959977, -30.721211242305866], 'ants': [175, 195, 212, 214, 231, 232, 326, 327, 331, 332, 336, 340]}, 22: {'pos': [21.427060077987438, -30.720670550054763], 'ants': [250, 251, 252, 253, 266, 267, 268, 269, 281, 282, 283, 295]}, 23: {'pos': [21.42767002595312, -30.720668542063535], 'ants': [254, 255, 256, 257, 270, 271, 272, 273, 284, 285, 286, 287]}, 24: {'pos': [21.42838974031629, -30.720641805595115], 'ants': [258, 259, 260, 274, 275, 276, 288, 289, 290, 291, 302, 303]}, 25: {'pos': [21.429052089734615, -30.720798251186455], 'ants': [230, 247, 248, 249, 263, 264, 265, 279, 280, 335, 339]}, 26: {'pos': [21.427312432981267, -30.720413813332755], 'ants': [296, 297, 298, 308, 309, 310, 330, 334, 338, 341, 346, 347]}, 27: {'pos': [21.42789750442093, -30.72038427427254], 'ants': [299, 300, 301, 311, 312, 313, 314, 342, 343]}, 28: {'pos': [21.428507450517774, -30.72038226236355], 'ants': [304, 305, 315, 316, 317, 318, 348]}, 29: {'pos': [21.42885912979846, -30.72052728164184], 'ants': [277, 278, 292, 293, 294, 306, 307, 319, 344, 345, 349]}} freqs = uvd.freq_array[0] taus = np.fft.fftshift(np.fft.fftfreq(freqs.size, np.diff(freqs)[0]))*1e9 idx_region1 = np.where(np.logical_and(taus > 2500, taus < 3000))[0] idx_region2 = np.where(np.logical_and(taus > 2000, taus < 2500))[0] fig = plt.figure(figsize=(14,10)) nodes, antDict, inclNodes = generate_nodeDict(uvd) antnums = uvd.get_ants() cmap = plt.get_cmap('inferno') sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=10)) sm._A = [] ampmax = 10 ampmin = 0 rang = ampmax-ampmin for node in sorted(inclNodes): ants = sorted(nodes[node]['ants']) nodeamps = [] points = np.zeros((len(ants),2)) for i,antNum in enumerate(ants): key = (antNum, antNum, pol) idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0] antPos = uvd.antenna_positions[idx] amp = 10*np.log10(np.sqrt(np.nanmean(np.abs(_data_sq[key][:,idx_region1]))/np.nanmean(np.abs(_data_sq[key][:,idx_region2])))) nodeamps.append(amp) points[i,:] = [antPos[1],antPos[2]] hull = scipy.spatial.ConvexHull(points) center = np.average(points,axis=0) hullpoints = np.zeros((len(hull.simplices),2)) namp = np.nanmean(nodeamps) ncolor = cmap(float((namp-ampmin)/rang)) plt.fill(points[hull.vertices,0], points[hull.vertices,1],alpha=0.5,color=ncolor) for node in sorted(inclNodes): ants = sorted(nodes[node]['ants']) npos = nd[int(node)]['pos'] plt.plot(npos[0],npos[1],marker="s",markersize=15,color="black") for antNum in ants: idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0] antPos = uvd.antenna_positions[idx] key = (antNum, antNum, pol) amp = 10*np.log10(np.sqrt(np.nanmean(np.abs(_data_sq[key][:,idx_region1]))/np.nanmean(np.abs(_data_sq[key][:,idx_region2])))) if math.isnan(amp): marker="v" color="r" markersize=30 coloramp = [0] else: coloramp = cmap(float((amp-ampmin)/rang)) color = coloramp marker="h" markersize=40 plt.plot(antPos[1],antPos[2],marker=marker,markersize=markersize,color=color) if coloramp[0]>0.6 or math.isnan(amp): plt.text(antPos[1]-3,antPos[2],str(antNum),color='black') else: plt.text(antPos[1]-3,antPos[2],str(antNum),color='white') plt.title('Antenna map - {} polarization (JD{})'.format(pol, JD)) cbar = fig.colorbar(sm) cbar.set_label('2700ns Feature Amplitude (dB)') def plot_antFeatureMap_noise(uvd, d_even, d_odd, JD, pol='ee'): """ Plots the positions of all antennas that have data, colored by feature strength. Parameters ---------- uvd: UVData object Diff UVData object _data_sq: Dict Dictionary structured as _data_sq[(antenna number, antenna number, pol)], where the values are the feature strength that will determined the color on the map. JD: Int Julian date of the data pol: String Polarization to plot """ nd = {0: {'pos': [21.427320986820824, -30.722353385032143], 'ants': [0, 1, 2, 11, 12, 13, 14, 23, 24, 25, 26, 39]}, 1: {'pos': [21.427906055943357, -30.722367970752067], 'ants': [3, 4, 5, 6, 15, 16, 17, 18, 27, 28, 29, 30]}, 2: {'pos': [21.428502498826337, -30.722356438400826], 'ants': [7, 8, 9, 10, 19, 20, 21, 31, 32, 33, 321, 323]}, 3: {'pos': [21.427102788863543, -30.72199587048034], 'ants': [36, 37, 38, 50, 51, 52, 53, 65, 66, 67, 68, 320]}, 4: {'pos': [21.427671849802184, -30.7220282862175], 'ants': [40, 41, 42, 54, 55, 56, 57, 69, 70, 71, 72, 324]}, 5: {'pos': [21.42829977472493, -30.722027118338183], 'ants': [43, 44, 45, 46, 58, 59, 60, 73, 74, 75, 76, 322]}, 6: {'pos': [21.428836727299945, -30.72219119740069], 'ants': [22, 34, 35, 47, 48, 49, 61, 62, 63, 64, 77, 78]}, 7: {'pos': [21.426862825121685, -30.72169978685838], 'ants': [81, 82, 83, 98, 99, 100, 116, 117, 118, 119, 137, 138]}, 8: {'pos': [21.427419087275524, -30.72169615183073], 'ants': [84, 85, 86, 87, 101, 102, 103, 104, 120, 121, 122, 123]}, 9: {'pos': [21.42802904166864, -30.721694142092485], 'ants': [88, 89, 90, 91, 105, 106, 107, 108, 124, 125, 126, 325]}, 10: {'pos': [21.42863899600041, -30.721692129488424], 'ants': [92, 93, 94, 109, 110, 111, 112, 127, 128, 129, 130, 328]}, 11: {'pos': [21.42914035998215, -30.721744794462655], 'ants': [79, 80, 95, 96, 97, 113, 114, 115, 131, 132, 133, 134]}, 12: {'pos': [21.426763768223857, -30.72133448059758], 'ants': [135, 136, 155, 156, 157, 158, 176, 177, 178, 179, 329, 333]}, 13: {'pos': [21.42734159294201, -30.72141297904905], 'ants': [139, 140, 141, 142, 159, 160, 161, 162, 180, 181, 182, 183]}, 14: {'pos': [21.428012089958028, -30.721403280585722], 'ants': [143, 144, 145, 146, 163, 164, 165, 166, 184, 185, 186, 187]}, 15: {'pos': [21.428561498114107, -30.721408957468245], 'ants': [147, 148, 149, 150, 167, 168, 169, 170, 188, 189, 190, 191]}, 16: {'pos': [21.42914681969319, -30.721434635693182], 'ants': [151, 152, 153, 154, 171, 172, 173, 174, 192, 193, 194, 213]}, 17: {'pos': [21.426857989080208, -30.72109992091893], 'ants': [196, 197, 198, 199, 215, 216, 217, 218, 233, 234, 235, 337]}, 18: {'pos': [21.427443064426363, -30.7210702936363], 'ants': [200, 201, 202, 203, 219, 220, 221, 222, 236, 237, 238, 239]}, 19: {'pos': [21.428053014877808, -30.72106828382215], 'ants': [204, 205, 206, 207, 223, 224, 225, 226, 240, 241, 242, 243]}, 20: {'pos': [21.428662965267904, -30.721066271142263], 'ants': [208, 209, 210, 211, 227, 228, 229, 244, 245, 246, 261, 262]}, 21: {'pos': [21.429383860959977, -30.721211242305866], 'ants': [175, 195, 212, 214, 231, 232, 326, 327, 331, 332, 336, 340]}, 22: {'pos': [21.427060077987438, -30.720670550054763], 'ants': [250, 251, 252, 253, 266, 267, 268, 269, 281, 282, 283, 295]}, 23: {'pos': [21.42767002595312, -30.720668542063535], 'ants': [254, 255, 256, 257, 270, 271, 272, 273, 284, 285, 286, 287]}, 24: {'pos': [21.42838974031629, -30.720641805595115], 'ants': [258, 259, 260, 274, 275, 276, 288, 289, 290, 291, 302, 303]}, 25: {'pos': [21.429052089734615, -30.720798251186455], 'ants': [230, 247, 248, 249, 263, 264, 265, 279, 280, 335, 339]}, 26: {'pos': [21.427312432981267, -30.720413813332755], 'ants': [296, 297, 298, 308, 309, 310, 330, 334, 338, 341, 346, 347]}, 27: {'pos': [21.42789750442093, -30.72038427427254], 'ants': [299, 300, 301, 311, 312, 313, 314, 342, 343]}, 28: {'pos': [21.428507450517774, -30.72038226236355], 'ants': [304, 305, 315, 316, 317, 318, 348]}, 29: {'pos': [21.42885912979846, -30.72052728164184], 'ants': [277, 278, 292, 293, 294, 306, 307, 319, 344, 345, 349]}} freqs = uvd.freq_array[0] taus = np.fft.fftshift(np.fft.fftfreq(freqs.size, np.diff(freqs)[0]))*1e9 idx_region = np.where(taus > 1000)[0] fig = plt.figure(figsize=(14,10)) nodes, antDict, inclNodes = generate_nodeDict(uvd) antnums = uvd.get_ants() cmap = plt.get_cmap('inferno') sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=7)) sm._A = [] ampmax = 7 ampmin = 0 rang = ampmax-ampmin for node in sorted(inclNodes): ants = sorted(nodes[node]['ants']) nodeamps = [] points = np.zeros((len(ants),2)) for i,antNum in enumerate(ants): key = (antNum, antNum, pol) idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0] antPos = uvd.antenna_positions[idx] diff = uvd.get_data(key) amp = np.nanmean(get_ds_average(d_even[key], d_odd[key])[idx_region])/np.nanmean(get_ds_average(diff, diff)[idx_region]) nodeamps.append(amp) points[i,:] = [antPos[1],antPos[2]] hull = scipy.spatial.ConvexHull(points) center = np.average(points,axis=0) hullpoints = np.zeros((len(hull.simplices),2)) namp = np.nanmean(nodeamps) ncolor = cmap(float((namp-ampmin)/rang)) plt.fill(points[hull.vertices,0], points[hull.vertices,1],alpha=0.5,color=ncolor) for node in sorted(inclNodes): ants = sorted(nodes[node]['ants']) npos = nd[int(node)]['pos'] plt.plot(npos[0],npos[1],marker="s",markersize=15,color="black") for antNum in ants: idx = np.argwhere(uvd.antenna_numbers == antNum)[0][0] antPos = uvd.antenna_positions[idx] key = (antNum, antNum, pol) diff = uvd.get_data(key) amp = np.nanmean(get_ds_average(d_even[key], d_odd[key])[idx_region])/np.nanmean(get_ds_average(diff, diff)[idx_region]) if math.isnan(amp): marker="v" color="r" markersize=30 coloramp = [0] else: coloramp = cmap(float((amp-ampmin)/rang)) color = coloramp marker="h" markersize=40 plt.plot(antPos[1],antPos[2],marker=marker,markersize=markersize,color=color) if coloramp[0]>0.6 or math.isnan(amp): plt.text(antPos[1]-3,antPos[2],str(antNum),color='black') else: plt.text(antPos[1]-3,antPos[2],str(antNum),color='white') plt.title('Antenna map - {} polarization (JD{})'.format(pol, JD)) cbar = fig.colorbar(sm) cbar.set_label('Ratio of delay spectrum to noise floor (dB)') def get_ds_noise_ratio(uvd, uvd_diff, bls): freqs = uvd.freq_array[0]*1e-6 pols = ['nn', 'ee'] freqs1 = [40, 50, 120, 155, 190] freqs2 = [250, 85, 155, 190, 225] freq_range = freqs1+freqs2 ds_noise_ratio = {} for freq1, freq2 in zip(freqs1, freqs2): d_even, d_odd = clean_ds(bls, uvd, uvd_diff, freq_range=[freq1, freq2], pols=pols, return_option='vis') idx_freq = np.where(np.logical_and(freqs >= freq1, freqs <= freq2))[0] freqs_sub = freqs[idx_freq] taus = np.fft.fftshift(np.fft.fftfreq(freqs_sub.size, np.diff(freqs_sub)[0]*1e6))*1e9 idx_region = np.where(taus > 1000)[0] ants = uvd.get_ants() for pol in pols: ds_noise_ratio[(freq1, freq2, pol)] = [] for i, antNum in enumerate(ants): key = (antNum, antNum, pol) idx = np.argwhere(ants == antNum)[0][0] diff = uvd_diff.get_data(key) ratio = np.nanmean(get_ds_average(d_even[key], d_odd[key])[idx_region])/np.nanmean(get_ds_average(diff, diff)[idx_region]) ds_noise_ratio[(freq1, freq2, pol)].append(ratio) ds_noise_ratio[(freq1, freq2, pol)] = np.array(ds_noise_ratio[(freq1, freq2, pol)]) return ds_noise_ratio def get_ds_average(d_even, d_odd, Nint=3): Ntime_bin = d_even.shape[0] // Nint Nfreq = d_even.shape[1] d_even_ave = np.zeros((Ntime_bin, Nfreq), dtype=np.complex128) d_odd_ave = np.zeros((Ntime_bin, Nfreq), dtype=np.complex128) win = dspec.gen_window('bh7', Nfreq) for i in range(Ntime_bin): d_even_ave[i] = np.nanmean(d_even[i*Nint:(i+1)*Nint], axis=0) d_odd_ave[i] = np.nanmean(d_odd[i*Nint:(i+1)*Nint], axis=0) _d_even_ave = np.fft.fftshift(np.fft.ifft(d_even_ave*win), axes=1) _d_odd_ave = np.fft.fftshift(np.fft.ifft(d_odd_ave*win), axes=1) N_alt = _d_even_ave.shape[0] // 2 _d_ave = np.sqrt(np.abs(np.nanmean(_d_even_ave[::][:N_alt]*_d_odd_ave[1::][:N_alt].conj(), axis=0))) return _d_ave def interactive_plots_dspec(bls, uvd, uvd_diff, JD): output_notebook(hide_banner=True) freqs = uvd.freq_array[0] FM_idx = np.searchsorted(freqs*1e-6, [85,110]) flag_FM = np.zeros(freqs.size, dtype=bool) flag_FM[FM_idx[0]:FM_idx[1]] = True pols = ['nn', 'ee'] freqs1 = [40, 50, 120, 155, 190] freqs2 = [250, 85, 155, 190, 225] freq_range = freqs1+freqs2 d_even_dict = {} d_odd_dict = {} for freq1, freq2 in zip(freqs1, freqs2): d_even, d_odd = clean_ds(bls, uvd, uvd_diff, freq_range=[freq1, freq2], pols=pols, return_option='vis') d_even_dict[(freq1, freq2)] = d_even d_odd_dict[(freq1, freq2)] = d_odd nodes, antDict, inclNodes = generate_nodeDict(uvd) data_full = [] wgts_full = [] taus_full = [] _data_full = [] _diff_full = [] _diff_full2 = [] N_xaxis = [] N_aggr = [0] keys = [] for i, bl in enumerate(bls): for j, pol in enumerate(pols): key = (bl[0],bl[1],pol) keys.append(str(key)+' -- node {} (snap {})'.format(int(antDict[bl[0]]['node']),antDict[bl[0]]['snapLocs'][0])) auto = np.abs(uvd.get_data(key)) auto /= np.median(auto, axis=1)[:,np.newaxis] auto[np.isinf(auto)] = np.nan auto_ave = np.nanmean(auto, axis=0, dtype=np.float64) wgts = (~uvd.get_flags(key)*~flag_FM[np.newaxis,:]) wgts_ave = np.mean(wgts, axis=0) wgts_ave = np.where(wgts_ave > 0.7, 1, 0) if(np.isnan(np.mean(auto_ave)) != True): data_full = data_full + list(auto_ave) else: data_full = data_full + list(np.isnan(auto_ave).astype(float)) wgts_full = wgts_full + list(wgts_ave) for freq1, freq2 in zip(freqs1, freqs2): idx_freq = np.where(np.logical_and(freqs*1e-6 > freq1, freqs*1e-6 < freq2))[0] d_even = d_even_dict[(freq1,freq2)][key] d_odd = d_odd_dict[(freq1,freq2)][key] diff = uvd_diff.get_data(key)[:,idx_freq] _data_ave = get_ds_average(d_even, d_odd) _diff_ave = get_ds_average(diff, diff) if(np.isnan(np.mean(_data_ave)) != True and np.mean(_data_ave) != 0): _data_full = _data_full + list(10*np.log10(_data_ave/_data_ave.max())) _diff_full = _diff_full + list(10*np.log10(_diff_ave/_data_ave.max())) else: _data_full = _data_full + list(np.isnan(_data_ave).astype(float)) _diff_full = _diff_full + list(np.isnan(_diff_ave).astype(float)) if(i == 0 and j == 0): freqs_sub = freqs[idx_freq] taus_sub = np.fft.fftshift(np.fft.fftfreq(freqs_sub.size, np.diff(freqs_sub)[0])) taus_full = taus_full + list(taus_sub*1e9) N_xaxis.append(len(freqs_sub)) N_aggr.append(np.sum(N_xaxis)) x_le = taus_full[:freqs.size] ds_update = _data_full[:freqs.size] dff_update = _diff_full[:freqs.size] x_ri = freqs/1e6 auto_update = np.log10(data_full[:freqs.size]) auto_flagged_update = np.array(auto_update, dtype=np.float64)/np.array(wgts_full[:freqs.size], dtype=np.float64)-0.1 source = ColumnDataSource(data=dict(x_le=x_le, ds_update=ds_update, dff_update=dff_update, N_xaxis=N_xaxis, N_aggr=N_aggr, taus_full=taus_full, _data_full=_data_full, _diff_full=_diff_full, x_ri=x_ri, auto_update=auto_update, auto_flagged_update=auto_flagged_update, data_full=data_full, wgts_full=wgts_full)) plot1 = figure(title="Delay spectrum", x_range=(0, 4500), y_range=(-60, 0), plot_width=550, plot_height=500, output_backend="canvas", tools='pan,box_zoom,box_select,crosshair,reset,save,wheel_zoom,hover') plot1.line('x_le', 'ds_update', source=source, color='#1f77b4', line_width=2, alpha=0.8, legend_label='delay spectrum') plot1.line('x_le', 'dff_update', source=source, color='red', line_width=1.5, alpha=0.6, legend_label='noise from diff') plot1.xaxis.axis_label = '𝜏 (ns)' plot1.yaxis.axis_label = '|Ṽ (𝜏)| in dB' plot2 = figure(title="Autocorrelation", y_range=(-0.6, 0.4), x_range=Range1d(start=freqs.min()/1e6, end=freqs.max()/1e6), plot_width=550, plot_height=500, output_backend="canvas", tools='pan,box_zoom,box_select,crosshair,reset,save,wheel_zoom,hover') plot2.line('x_ri', 'auto_update', source=source, color='#ff7f0e', line_width=2, alpha=0.8, legend_label='unflagged auto') plot2.line('x_ri', 'auto_flagged_update', source=source, color='#1f77b4', line_width=2, alpha=0.8, legend_label='flagged auto') plot2.xaxis.axis_label = '𝜈 (MHz)' plot2.yaxis.axis_label = 'log10(|V(𝜈)|)' radio_button = RadioButtonGroup(labels=["Full band", "50-85 MHz", "120-155 MHz", "155-190 MHz", "190-225 MHz"], active=0) select = Select(title="key:", value=keys[0], options=keys, width=300) callback = CustomJS(args=dict(source=source, select=select, radio_button=radio_button, xr=plot2.x_range), code=""" var data = source.data; var active = radio_button.active; var key = select.value; var keys = select.options; var x_le = []; var y1_le = []; var y2_le = []; var y1_ri = []; var y2_ri = []; var N_xaxis = data['N_xaxis']; var N_aggr = data['N_aggr']; var taus_full = data['taus_full']; var _data_full = data['_data_full']; var _diff_full = data['_diff_full']; var x_ri = data['x_ri'] var data_full = data['data_full'] var wgts_full = data['wgts_full'] for (var i = 0; i < keys.length; i++) { if (key == keys[i]) { for (var j = 0; j < N_xaxis[active]; j++) { x_le.push(taus_full[N_aggr[active]+j]); y1_le.push(_data_full[N_aggr[5]*i+N_aggr[active]+j]); y2_le.push(_diff_full[N_aggr[5]*i+N_aggr[active]+j]); } for (var j = 0; j < x_ri.length; j++) { y1_ri.push(Math.log10(data_full[x_ri.length*i+j])); y2_ri.push(Math.log10(data_full[x_ri.length*i+j])/wgts_full[x_ri.length*i+j]-0.1); } } } data['x_le'] = x_le; data['ds_update'] = y1_le; data['dff_update'] = y2_le; data['auto_update'] = y1_ri; data['auto_flagged_update'] = y2_ri; if (active == 0) { var start = 46.92 var end = 234.30 } else if (active == 1) { var start = 50 var end = 85 } else { var start = 120+(active-2)*35 var end = 120+(active-1)*35 } xr.setv({"start": start, "end": end}) source.change.emit(); """) radio_button.js_on_change('active', callback) select.js_on_change('value', callback) plot2.x_range.js_on_change('start', callback) plot2.x_range.js_on_change('end', callback) layout = column( row(plot1, plot2), select, column(radio_button) ) show(layout); def CorrMatrix_2700ns(uvd, HHfiles, difffiles, flagfile, JD, N_threads=12): """ Plots a matrix representing the 2700ns feature correlation of each baseline. Parameters: ---------- uvd: UVData Object Sample observation from the desired night, used for getting antenna information. HHfiles: List List of all files for a night of observation difffiles: List List of diff files for a night of observation flagfile: String Sting of flag file JD: String JD of the given night of observation """ pols = ['nn','ee','ne','en'] Nants = len(uvd.get_ants()) files, lsts, inds = get_hourly_files(uvd, HHfiles, JD) nTimes = len(files) if nTimes > 3: plotTimes = [0,nTimes//2,nTimes-1] else: plotTimes = np.arange(0,nTimes,1) for t_i, t in enumerate(plotTimes): ind = inds[t] HHfile = HHfiles[ind] difffile = difffiles[ind] uvd_data_ds = UVData() uvd_data_ds.read(HHfile) uvd_diff_ds = UVData() uvd_diff_ds.read(difffile) uvf = UVFlag() uvf.read(flagfile) bls = uvd_data_ds.get_antpairs() times_uvf = np.unique(uvf.time_array) times_uvd = np.unique(uvd_data_ds.time_array) idx_times = [np.where(time_uvd == times_uvf)[0][0] for time_uvd in times_uvd] uvd_data_ds.flag_array[:,0,:,:] = np.repeat(uvf.flag_array[idx_times], len(bls), axis=0) if(t_i == 0): antpos, ants = uvd_data_ds.get_ENU_antpos() bl_len = [] for bl in bls: idx_ant1 = np.where(bl[0] == ants)[0] idx_ant2 = np.where(bl[1] == ants)[0] bl_len.append(np.sqrt(np.sum((antpos[idx_ant2]-antpos[idx_ant1])**2))) bl_len = np.array(bl_len) area = 250+bl_len/scipy.constants.c*1e9 _d_cleaned_sq = clean_ds(bls, uvd_data_ds, uvd_diff_ds, pols=pols, area=area, return_option='dspec', N_threads=N_threads) freqs = uvd_data_ds.freq_array[0] taus = np.fft.fftshift(np.fft.fftfreq(freqs.size, np.diff(freqs)[0]))*1e9 idx_region1 = np.where(np.logical_and(taus > 2500, taus < 3000))[0] idx_region2 = np.where(np.logical_and(taus > 2000, taus < 2500))[0] amp = {} for pol in pols: _data_cleaned_sq = np.zeros((Nants, Nants, len(taus)), dtype=np.complex128) for i, ant1 in enumerate(uvd_data_ds.get_ants()): for j, ant2 in enumerate(uvd_data_ds.get_ants()): if(i <= j): bl = (ant1, ant2) try: _data_cleaned_sq[i,j] = np.nanmean(_d_cleaned_sq[(bl[0],bl[1],pol)], axis=0) except: _data_cleaned_sq[i,j] = np.nanmean(_d_cleaned_sq[(bl[1],bl[0],pol)], axis=0) _data_cleaned_sq[j,i] = _data_cleaned_sq[i,j] amp[pol] = 10*np.log10(np.sqrt(np.nanmean(np.abs(_data_cleaned_sq[:,:,idx_region1]), axis=-1)/np.nanmean(np.abs(_data_cleaned_sq[:,:,idx_region2]), axis=-1))) plotCorrMatrix(uvd_data_ds, amp, pols=pols, nodes='auto', vminIn=0, vmaxIn=3) def plot_metric(metrics, ants=None, antpols=None, title='', ylabel='Modified z-Score', xlabel=''): '''Helper function for quickly plotting an individual antenna metric.''' if ants is None: ants = list(set([key[0] for key in metrics.keys()])) if antpols is None: antpols = list(set([key[1] for key in metrics.keys()])) for antpol in antpols: for i,ant in enumerate(ants): metric = 0 if (ant,antpol) in metrics: metric = metrics[(ant,antpol)] plt.plot(i,metric,'.') plt.annotate(str(ant)+antpol,xy=(i,metrics[(ant,antpol)])) plt.gca().set_prop_cycle(None) plt.title(title) plt.ylabel(ylabel) plt.xlabel(xlabel) def show_metric(ant_metrics, antmetfiles, ants=None, antpols=None, title='', ylabel='Modified z-Score', xlabel=''): print("Ant Metrics for {}".format(antmetfiles[1])) plt.figure() plot_metric(ant_metrics['final_mod_z_scores']['meanVij'], title = 'Mean Vij Modified z-Score') plt.figure() plot_metric(ant_metrics['final_mod_z_scores']['redCorr'], title = 'Redundant Visibility Correlation Modified z-Score') plt.figure() plot_metric(ant_metrics['final_mod_z_scores']['meanVijXPol'], antpols=['n'], title = 'Modified z-score of (Vxy+Vyx)/(Vxx+Vyy)') plt.figure() plot_metric(ant_metrics['final_mod_z_scores']['redCorrXPol'], antpols=['n'], title = 'Modified z-Score of Power Correlation Ratio Cross/Same') plt.figure() plot_metric(ant_metrics['final_mod_z_scores']['redCorrXPol'], antpols=['e'], title = 'Modified z-Score of Power Correlation Ratio Cross/Same') def all_ant_mets(antmetfiles,HHfiles): file = HHfiles[0] uvd_hh = UVData() uvd_hh.read_uvh5(file) uvdx = uvd_hh.select(polarizations = -5, inplace = False) uvdx.ants = np.unique(np.concatenate([uvdx.ant_1_array, uvdx.ant_2_array])) ants = uvdx.get_ants() times = uvd_hh.time_array Nants = len(ants) jd_start = np.floor(times.min()) antfinfiles = [] for i,file in enumerate(antmetfiles): if i%50==0: antfinfiles.append(antmetfiles[i]) Nfiles = len(antfinfiles) Nfiles2 = len(antmetfiles) xants = np.zeros((Nants*2, Nfiles2)) dead_ants = np.zeros((Nants*2, Nfiles2)) cross_ants = np.zeros((Nants*2, Nfiles2)) badants = [] pol2ind = {'n':0, 'e':1} times = [] for i,file in enumerate(antfinfiles): time = file[54:60] times.append(time) for i,file in enumerate(antmetfiles): antmets = hera_qm.ant_metrics.load_antenna_metrics(file) for j in antmets['xants']: xants[2*np.where(ants==j[0])[0]+pol2ind[j[1]], i] = 1 badants.extend(map(lambda x: x[0], antmets['xants'])) for j in antmets['crossed_ants']: cross_ants[2*np.where(ants==j[0])[0]+pol2ind[j[1]], i] = 1 for j in antmets['dead_ants']: dead_ants[2*np.where(ants==j[0])[0]+pol2ind[j[1]], i] = 1 badants = np.unique(badants) xants[np.where(xants==1)] *= np.nan dead_ants[np.where(dead_ants==0)] *= np.nan cross_ants[np.where(cross_ants==0)] *= np.nan antslabels = [] for i in ants: labeln = str(i) + 'n' labele = str(i) + 'e' antslabels.append(labeln) antslabels.append(labele) fig, ax = plt.subplots(1, figsize=(16,20)) # plotting ax.matshow(xants, aspect='auto', cmap='RdYlGn_r', vmin=-.3, vmax=1.3, extent=[0, len(times), Nants*2, 0]) ax.matshow(dead_ants, aspect='auto', cmap='RdYlGn_r', vmin=-.3, vmax=1.3, extent=[0, len(times), Nants*2, 0]) ax.matshow(cross_ants, aspect='auto', cmap='RdBu', vmin=-.3, vmax=1.3, extent=[0, len(times), Nants*2, 0]) # axes ax.grid(color='k') ax.xaxis.set_ticks_position('bottom') ax.set_xticks(np.arange(len(times))+0.5) ax.set_yticks(np.arange(Nants*2)+0.5) ax.tick_params(size=8) if Nfiles > 20: ticklabels = times ax.set_xticklabels(ticklabels) else: ax.set_xticklabels(times) ax.set_yticklabels(antslabels) [t.set_rotation(30) for t in ax.get_xticklabels()] [t.set_size(12) for t in ax.get_xticklabels()] [t.set_rotation(0) for t in ax.get_yticklabels()] [t.set_size(12) for t in ax.get_yticklabels()] ax.set_title("Ant Metrics bad ants over observation", fontsize=14) ax.set_xlabel('decimal of JD = {}'.format(int(jd_start)), fontsize=16) ax.set_ylabel('antenna number and pol', fontsize=16) red_ptch = mpatches.Patch(color='red') grn_ptch = mpatches.Patch(color='green') blu_ptch = mpatches.Patch(color='blue') ax.legend([red_ptch, blu_ptch, grn_ptch], ['dead ant', 'cross ant', 'good ant'], fontsize=14)
example18_iot_chime_nn.py
#!/usr/bin/env python3 # coding: utf-8 # Example 18 IoT チャイム WSGI 版 【複数種のチャイム音対応版】 port = 4 # GPIO ポート番号 ping_f = 554 # チャイム音の周波数1 pong_f = 440 # チャイム音の周波数2 pingAlt_f = 587 # 警告音の周波数1 pongAlt_f = 699 # 警告音の周波数2 from wsgiref.simple_server import make_server from RPi import GPIO # GPIOモジュールの取得 from time import sleep # スリープ実行モジュールの取得 from sys import argv # 本プログラムの引数argvを取得 import threading # スレッド用ライブラリの取得 def chime(level): # チャイム(スレッド用) if level is None or level <= 0: # 範囲外の値の時に mutex.acquire() # mutex状態に設定(排他処理開始) pwm.ChangeFrequency(ping_f) # PWM周波数の変更 pwm.start(50) # PWM出力を開始。デューティ50% sleep(0.5) # 0.5秒の待ち時間処理 pwm.ChangeFrequency(pong_f) # PWM周波数の変更 sleep(0.5) # 0.5秒の待ち時間処理 pwm.stop() # PWM出力停止 mutex.release() # mutex状態の開放(排他処理終了) return # 戻る if level >= 1: # 警告レベル1以上のとき mutex.acquire() # mutex状態に設定(排他処理開始) pwm.ChangeFrequency(pingAlt_f) # PWM周波数の変更 pwm.start(50) # PWM出力を開始。50% sleep(0.1) # 0.1秒の待ち時間処理 pwm.stop() # PWM出力停止取得 mutex.release() # mutex状態の開放(排他処理終了) if level >= 2: # 警告レベル2以上のとき mutex.acquire() # mutex状態に設定(排他処理開始) pwm.ChangeFrequency(pongAlt_f) # PWM周波数の変更 pwm.start(50) # PWM出力を開始。50% sleep(0.2) # 0.2秒の待ち時間処理 pwm.stop() # PWM出力停止 mutex.release() # mutex状態の開放(排他処理終了) if level >= 3: # 警告レベル3のとき for i in range(23): # 下記を23回繰り返す sleep(0.1) # 0.1秒の待ち時間処理 chime(2) # レベル2と同じ鳴音処理 def wsgi_app(environ, start_response): # HTTPアクセス受信時の処理 level = 0 query = environ.get('QUERY_STRING') # 変数queryにHTTPクエリを代入 sp = query.find('=') # 変数query内の「=」を探す if sp >= 0 and sp + 1 < len(query): # 「=」の発見位置が有効範囲内 if query[sp+1:].isdigit(): # 取得値が数値の時 level = int( query[sp+1:] ) # 取得値(数値)を変数colorへ level %= 4 # levelは1~3 print('level =',level) # level番号を表示 ok = 'Level=' + str(level) + '\r\n' # 応答文を作成 ok = ok.encode('utf-8') # バイト列へ変換 start_response('200 OK', [('Content-type', 'text/plain; charset=utf-8')]) if environ['PATH_INFO'] == '/': # リクエスト先がルートのとき thread = threading.Thread(target=chime, args=([level])) # 関数chime thread.start() # スレッドchimeの起動 return [ok] # 応答メッセージを返却 print(argv[0]) # プログラム名を表示する if len(argv) >= 2: # 引数があるとき port = int(argv[1]) # GPIOポート番号をportへ代入 GPIO.setmode(GPIO.BCM) # ポート番号の指定方法の設定 GPIO.setup(port, GPIO.OUT) # ポート番号portのGPIOを出力に pwm = GPIO.PWM(port, ping_f) # PWM出力用のインスタンスを生成 mutex = threading.Lock() # 排他処理用のオブジェクト生成 try: httpd = make_server('', 80, wsgi_app) # TCPポート80でHTTPサーバ実体化 print("HTTP port 80") # ポート確保時にポート番号を表示 except PermissionError: # 例外処理発生時(アクセス拒否) httpd = make_server('', 8080, wsgi_app) # ポート8080でHTTPサーバ実体化 print("HTTP port 8080") # 起動ポート番号の表示 try: httpd.serve_forever() # HTTPサーバを起動 except KeyboardInterrupt: # キー割り込み発生時 print('\nKeyboardInterrupt') # キーボード割り込み表示 GPIO.cleanup(port) # GPIOを未使用状態に戻す exit() # プログラムの終了
test_utility.py
import threading import pytest from base.client_base import TestcaseBase from base.utility_wrapper import ApiUtilityWrapper from utils.util_log import test_log as log from common import common_func as cf from common import common_type as ct from common.common_type import CaseLabel, CheckTasks prefix = "utility" default_schema = cf.gen_default_collection_schema() default_int64_field_name = ct.default_int64_field_name default_field_name = ct.default_float_vec_field_name default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}} default_dim = ct.default_dim default_nb = ct.default_nb num_loaded_entities = "num_loaded_entities" num_total_entities = "num_total_entities" class TestUtilityParams(TestcaseBase): """ Test case of index interface """ @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_metric_type(self, request): if request.param == [] or request.param == "": pytest.skip("metric empty is valid for distance calculation") if isinstance(request.param, str): pytest.skip("string is valid type for metric") yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_metric_value(self, request): if request.param == [] or request.param == "": pytest.skip("metric empty is valid for distance calculation") if not isinstance(request.param, str): pytest.skip("Skip invalid type for metric") yield request.param @pytest.fixture(scope="function", params=["JACCARD", "Superstructure", "Substructure"]) def get_not_support_metric(self, request): yield request.param @pytest.fixture(scope="function", params=["metric_type", "metric"]) def get_support_metric_field(self, request): yield request.param @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_partition_names(self, request): if isinstance(request.param, list): if len(request.param) == 0: pytest.skip("empty is valid for partition") if request.param is None: pytest.skip("None is valid for partition") yield request.param """ ****************************************************************** # The followings are invalid cases ****************************************************************** """ @pytest.mark.tags(CaseLabel.L1) def test_has_collection_name_invalid(self, get_invalid_collection_name): """ target: test has_collection with error collection name method: input invalid name expected: raise exception """ self._connect() c_name = get_invalid_collection_name if isinstance(c_name, str) and c_name: self.utility_wrap.has_collection( c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"}) # elif not isinstance(c_name, str): # self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "illegal"}) @pytest.mark.tags(CaseLabel.L1) def test_has_partition_collection_name_invalid(self, get_invalid_collection_name): """ target: test has_partition with error collection name method: input invalid name expected: raise exception """ self._connect() c_name = get_invalid_collection_name p_name = cf.gen_unique_str(prefix) if isinstance(c_name, str) and c_name: self.utility_wrap.has_partition( c_name, p_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Invalid"}) @pytest.mark.tags(CaseLabel.L1) def test_has_partition_name_invalid(self, get_invalid_partition_name): """ target: test has_partition with error partition name method: input invalid name expected: raise exception """ self._connect() ut = ApiUtilityWrapper() c_name = cf.gen_unique_str(prefix) p_name = get_invalid_partition_name if isinstance(p_name, str) and p_name: ex, _ = ut.has_partition( c_name, p_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "Invalid"}) @pytest.mark.tags(CaseLabel.L1) def test_drop_collection_name_invalid(self, get_invalid_collection_name): self._connect() error = f'`collection_name` value {get_invalid_collection_name} is illegal' self.utility_wrap.drop_collection(get_invalid_collection_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: error}) # TODO: enable @pytest.mark.tags(CaseLabel.L1) def test_list_collections_using_invalid(self): """ target: test list_collections with invalid using method: input invalid name expected: raise exception """ self._connect() using = "empty" ut = ApiUtilityWrapper() ex, _ = ut.list_collections(using=using, check_task=CheckTasks.err_res, check_items={ct.err_code: 0, ct.err_msg: "should create connect"}) @pytest.mark.tags(CaseLabel.L1) def test_index_process_invalid_name(self, get_invalid_collection_name): """ target: test building_process method: input invalid name expected: raise exception """ pass # self._connect() # c_name = get_invalid_collection_name # ut = ApiUtilityWrapper() # if isinstance(c_name, str) and c_name: # ex, _ = ut.index_building_progress(c_name, check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"}) # TODO: not support index name @pytest.mark.tags(CaseLabel.L1) def _test_index_process_invalid_index_name(self, get_invalid_index_name): """ target: test building_process method: input invalid index name expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) index_name = get_invalid_index_name ut = ApiUtilityWrapper() ex, _ = ut.index_building_progress(c_name, index_name) log.error(str(ex)) assert "invalid" or "illegal" in str(ex) @pytest.mark.tags(CaseLabel.L1) def test_wait_index_invalid_name(self, get_invalid_collection_name): """ target: test wait_index method: input invalid name expected: raise exception """ pass # self._connect() # c_name = get_invalid_collection_name # ut = ApiUtilityWrapper() # if isinstance(c_name, str) and c_name: # ex, _ = ut.wait_for_index_building_complete(c_name, check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"}) @pytest.mark.tags(CaseLabel.L1) def _test_wait_index_invalid_index_name(self, get_invalid_index_name): """ target: test wait_index method: input invalid index name expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) index_name = get_invalid_index_name ut = ApiUtilityWrapper() ex, _ = ut.wait_for_index_building_complete(c_name, index_name) log.error(str(ex)) assert "invalid" or "illegal" in str(ex) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.parametrize("invalid_c_name", ["12-s", "12 s", "(mn)", "中文", "%$#"]) def test_loading_progress_invalid_collection_name(self, invalid_c_name): """ target: test loading progress with invalid collection name method: input invalid collection name expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) df = cf.gen_default_dataframe_data(nb=ct.default_nb) self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name) self.collection_wrap.load() error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(invalid_c_name)} self.utility_wrap.loading_progress(invalid_c_name, check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tags(CaseLabel.L1) def test_loading_progress_not_existed_collection_name(self): """ target: test loading progress with invalid collection name method: input invalid collection name expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) df = cf.gen_default_dataframe_data(nb=ct.default_nb) self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name) self.collection_wrap.load() error = {ct.err_code: 1, ct.err_msg: "describe collection failed: can't find collection"} self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tag(CaseLabel.L1) @pytest.mark.xfail(reason="issue #7613") def test_loading_progress_invalid_partition_names(self, get_invalid_partition_names): """ target: test loading progress with invalid partition names method: input invalid partition names expected: raise an exception """ collection_w = self.init_collection_general(prefix)[0] partition_names = get_invalid_partition_names err_msg = {ct.err_code: 0, ct.err_msg: "`partition_name_array` value {} is illegal".format(partition_names)} collection_w.load() self.utility_wrap.loading_progress(collection_w.name, partition_names, check_task=CheckTasks.err_res, check_items=err_msg) @pytest.mark.tag(CaseLabel.L1) @pytest.mark.xfail("issue #7613") @pytest.mark.parametrize("partition_names", [[ct.default_tag], [ct.default_partition_name, ct.default_tag]]) def test_loading_progress_not_existed_partitions(self, partition_names): """ target: test loading progress with not existed partitions method: input all or part not existed partition names expected: raise exception """ collection_w = self.init_collection_general(prefix)[0] log.debug(collection_w.num_entities) collection_w.load() err_msg = {ct.err_code: 0, ct.err_msg: "can't find partition"} self.utility_wrap.loading_progress(collection_w.name, partition_names, check_task=CheckTasks.err_res, check_items=err_msg) @pytest.mark.tags(CaseLabel.L1) def test_wait_for_loading_collection_not_existed(self): """ target: test wait for loading method: input collection not created before expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) self.utility_wrap.wait_for_loading_complete( c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "can't find collection"}) @pytest.mark.tags(CaseLabel.L1) def test_wait_for_loading_partition_not_existed(self): """ target: test wait for loading method: input partition not created before expected: raise exception """ self._connect() collection_w = self.init_collection_wrap() self.utility_wrap.wait_for_loading_complete( collection_w.name, partition_names=[ct.default_tag], check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: f'partitionID of partitionName:{ct.default_tag} can not be find'}) def test_drop_collection_not_existed(self): """ target: test drop an not existed collection method: drop a not created collection expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) error = {ct.err_code: 0, ct.err_msg: "describe collection failed: can't find collection:"} self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_left_vector_invalid_type(self, get_invalid_vector_dict): """ target: test calculated distance with invalid vectors method: input invalid vectors type expected: raise exception """ self._connect() invalid_vector = get_invalid_vector_dict if not isinstance(invalid_vector, dict): self.utility_wrap.calc_distance(invalid_vector, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "vectors_left value {} " "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_left_vector_invalid_value(self, get_invalid_vector_dict): """ target: test calculated distance with invalid vectors method: input invalid vectors value expected: raise exception """ self._connect() invalid_vector = get_invalid_vector_dict if isinstance(invalid_vector, dict): self.utility_wrap.calc_distance(invalid_vector, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "vectors_left value {} " "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_right_vector_invalid_type(self, get_invalid_vector_dict): """ target: test calculated distance with invalid vectors method: input invalid vectors type expected: raise exception """ self._connect() invalid_vector = get_invalid_vector_dict vector = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vector} if not isinstance(invalid_vector, dict): self.utility_wrap.calc_distance(op_l, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "vectors_right value {} " "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_right_vector_invalid_value(self, get_invalid_vector_dict): """ target: test calculated distance with invalid vectors method: input invalid vectors value expected: raise exception """ self._connect() invalid_vector = get_invalid_vector_dict vector = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vector} if isinstance(invalid_vector, dict): self.utility_wrap.calc_distance(op_l, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "vectors_right value {} " "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_invalid_metric_type(self, get_support_metric_field, get_invalid_metric_type): """ target: test calculated distance with invalid metric method: input invalid metric expected: raise exception """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} metric_field = get_support_metric_field metric = get_invalid_metric_type params = {metric_field: metric} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "params value {{'metric': {}}} " "is illegal".format(metric)}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_invalid_metric_value(self, get_support_metric_field, get_invalid_metric_value): """ target: test calculated distance with invalid metric method: input invalid metric expected: raise exception """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} metric_field = get_support_metric_field metric = get_invalid_metric_value params = {metric_field: metric} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "{} metric type is invalid for " "float vector".format(metric)}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_not_support_metric(self, get_support_metric_field, get_not_support_metric): """ target: test calculated distance with invalid metric method: input invalid metric expected: raise exception """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} metric_field = get_support_metric_field metric = get_not_support_metric params = {metric_field: metric} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "{} metric type is invalid for " "float vector".format(metric)}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_invalid_using(self, get_support_metric_field): """ target: test calculated distance with invalid using method: input invalid using expected: raise exception """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} metric_field = get_support_metric_field params = {metric_field: "L2", "sqrt": True} using = "empty" self.utility_wrap.calc_distance(op_l, op_r, params, using=using, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "should create connect"}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_not_match_dim(self): """ target: test calculated distance with invalid vectors method: input invalid vectors type and value expected: raise exception """ self._connect() dim = 129 vector_l = cf.gen_vectors(default_nb, default_dim) vector_r = cf.gen_vectors(default_nb, dim) op_l = {"float_vectors": vector_l} op_r = {"float_vectors": vector_r} self.utility_wrap.calc_distance(op_l, op_r, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "Cannot calculate distance between " "vectors with different dimension"}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_collection_before_load(self, get_support_metric_field): """ target: test calculated distance when entities is not ready method: calculate distance before load expected: raise exception """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, is_index=True) middle = len(insert_ids) // 2 op_l = {"ids": insert_ids[:middle], "collection": collection_w.name, "field": default_field_name} op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "field": default_field_name} metric_field = get_support_metric_field params = {metric_field: "L2", "sqrt": True} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.err_res, check_items={"err_code": 1, "err_msg": "collection {} was not " "loaded into memory)".format(collection_w.name)}) class TestUtilityBase(TestcaseBase): """ Test case of index interface """ @pytest.fixture(scope="function", params=["metric_type", "metric"]) def metric_field(self, request): yield request.param @pytest.fixture(scope="function", params=[True, False]) def sqrt(self, request): yield request.param @pytest.fixture(scope="function", params=["L2", "IP"]) def metric(self, request): yield request.param @pytest.fixture(scope="function", params=["HAMMING", "TANIMOTO"]) def metric_binary(self, request): yield request.param @pytest.mark.tags(CaseLabel.L1) def test_has_collection(self): """ target: test has_collection with collection name method: input collection name created before expected: True """ cw = self.init_collection_wrap() res, _ = self.utility_wrap.has_collection(cw.name) assert res is True @pytest.mark.tags(CaseLabel.L2) def test_has_collection_not_created(self): """ target: test has_collection with collection name which is not created method: input random collection name expected: False """ c_name = cf.gen_unique_str(prefix) _ = self.init_collection_wrap() res, _ = self.utility_wrap.has_collection(c_name) assert res is False @pytest.mark.tags(CaseLabel.L1) def test_has_collection_after_drop(self): """ target: test has_collection with collection name droped before method: input random collection name expected: False """ c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) res, _ = self.utility_wrap.has_collection(c_name) assert res is True cw.drop() res, _ = self.utility_wrap.has_collection(c_name) assert res is False @pytest.mark.tags(CaseLabel.L2) def test_has_partition(self): """ target: test has_partition with partition name method: input collection name and partition name created before expected: True """ c_name = cf.gen_unique_str(prefix) p_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) self.init_partition_wrap(cw, p_name) res, _ = self.utility_wrap.has_partition(c_name, p_name) assert res is True @pytest.mark.tags(CaseLabel.L1) def test_has_partition_not_created(self): """ target: test has_partition with partition name method: input collection name, and partition name not created before expected: True """ c_name = cf.gen_unique_str(prefix) p_name = cf.gen_unique_str() self.init_collection_wrap(name=c_name) res, _ = self.utility_wrap.has_partition(c_name, p_name) assert res is False @pytest.mark.tags(CaseLabel.L1) def test_has_partition_after_drop(self): """ target: test has_partition with partition name method: input collection name, and partition name dropped expected: True """ c_name = cf.gen_unique_str(prefix) p_name = cf.gen_unique_str() cw = self.init_collection_wrap(name=c_name) pw = self.init_partition_wrap(cw, p_name) res, _ = self.utility_wrap.has_partition(c_name, p_name) assert res is True pw.drop() res, _ = self.utility_wrap.has_partition(c_name, p_name) assert res is False @pytest.mark.tags(CaseLabel.L2) def test_has_default_partition(self): """ target: test has_partition with '_default' partition method: input collection name and partition name created before expected: True """ c_name = cf.gen_unique_str(prefix) self.init_collection_wrap(name=c_name) res, _ = self.utility_wrap.has_partition(c_name, ct.default_partition_name) assert res is True @pytest.mark.tags(CaseLabel.L1) def test_list_collections(self): """ target: test list_collections method: create collection, list_collections expected: in the result """ c_name = cf.gen_unique_str(prefix) self.init_collection_wrap(name=c_name) res, _ = self.utility_wrap.list_collections() assert c_name in res # TODO: make sure all collections deleted @pytest.mark.tags(CaseLabel.L1) def _test_list_collections_no_collection(self): """ target: test list_collections method: no collection created, list_collections expected: length of the result equals to 0 """ self._connect() res, _ = self.utility_wrap.list_collections() assert len(res) == 0 @pytest.mark.tags(CaseLabel.L1) def test_index_process_collection_not_existed(self): """ target: test building_process method: input collection not created before expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) self.utility_wrap.index_building_progress( c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "can't find collection"}) @pytest.mark.tags(CaseLabel.L1) def test_index_process_collection_empty(self): """ target: test building_process method: input empty collection expected: no exception raised """ c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) self.index_wrap.init_index(cw.collection, default_field_name, default_index_params) res, _ = self.utility_wrap.index_building_progress(c_name) exp_res = {'total_rows': 0, 'indexed_rows': 0} assert res == exp_res @pytest.mark.tags(CaseLabel.L1) def test_index_process_collection_insert_no_index(self): """ target: test building_process method: insert 1 entity, no index created expected: no exception raised """ nb = 1 c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) data = cf.gen_default_list_data(nb) cw.insert(data=data) error = {ct.err_code: 1, ct.err_msg: "no index is created"} self.utility_wrap.index_building_progress(c_name, check_task=CheckTasks.err_res, check_items=error) @pytest.mark.tags(CaseLabel.L1) def test_index_process_collection_index(self): """ target: test building_process method: 1.insert 1024 (because minSegmentSizeToEnableIndex=1024) 2.build(server does create index) and call building_process expected: indexed_rows=0 """ nb = 1024 c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) data = cf.gen_default_list_data(nb) cw.insert(data=data) cw.create_index(default_field_name, default_index_params) res, _ = self.utility_wrap.index_building_progress(c_name) assert res['indexed_rows'] == 0 assert res['total_rows'] == nb @pytest.mark.tags(CaseLabel.L1) def test_index_process_collection_indexing(self): """ target: test building_process method: 1.insert 2048 entities to ensure that server will build 2.call building_process during building expected: 2048 or less entities indexed """ nb = 2048 c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) data = cf.gen_default_list_data(nb) cw.insert(data=data) cw.create_index(default_field_name, default_index_params) res, _ = self.utility_wrap.index_building_progress(c_name) assert (0 < res['indexed_rows'] <= nb) assert res['total_rows'] == nb # for _ in range(2): # assert "indexed_rows" in res # assert res["indexed_rows"] <= nb # assert res["indexed_rows"] >= 0 # assert "total_rows" in res # assert res["total_rows"] == nb @pytest.mark.tags(CaseLabel.L1) def test_wait_index_collection_not_existed(self): """ target: test wait_index method: input collection not created before expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) self.utility_wrap.wait_for_index_building_complete( c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "can't find collection"}) @pytest.mark.tags(CaseLabel.L1) def test_wait_index_collection_empty(self): """ target: test wait_index method: input empty collection expected: no exception raised """ self._connect() c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) cw.create_index(default_field_name, default_index_params) assert self.utility_wrap.wait_for_index_building_complete(c_name)[0] res, _ = self.utility_wrap.index_building_progress(c_name) exp_res = {'total_rows': 0, 'indexed_rows': 0} assert res == exp_res @pytest.mark.tags(CaseLabel.L1) def test_wait_index_collection_index(self): """ target: test wait_index method: insert 5000 entities, build and call wait_index expected: 5000 entity indexed """ nb = 5000 c_name = cf.gen_unique_str(prefix) cw = self.init_collection_wrap(name=c_name) data = cf.gen_default_list_data(nb) cw.insert(data=data) cw.create_index(default_field_name, default_index_params) res, _ = self.utility_wrap.wait_for_index_building_complete(c_name) assert res is True res, _ = self.utility_wrap.index_building_progress(c_name) assert res["indexed_rows"] == nb @pytest.mark.tag(CaseLabel.L1) def test_loading_progress_without_loading(self): """ target: test loading progress without loading method: insert and flush data, call loading_progress without loading expected: loaded entities is 0 """ collection_w = self.init_collection_wrap() df = cf.gen_default_dataframe_data() collection_w.insert(df) assert collection_w.num_entities == ct.default_nb exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb} res, _ = self.utility_wrap.loading_progress(collection_w.name) assert res == exp_res @pytest.mark.tag(CaseLabel.L1) @pytest.mark.parametrize("nb", [ct.default_nb, 5000]) def test_loading_progress_collection(self, nb): """ target: test loading progress method: 1.insert flush and load 2.call loading_progress expected: all entities is loafed, because load is synchronous """ # create, insert default_nb, flush and load collection_w = self.init_collection_general(prefix, insert_data=True, nb=nb)[0] res, _ = self.utility_wrap.loading_progress(collection_w.name) assert res[num_total_entities] == nb assert res[num_loaded_entities] == nb @pytest.mark.tag(CaseLabel.L1) @pytest.mark.xfail(reason="pymilvus issue #702") def test_loading_progress_with_async_load(self): """ target: test loading progress with async collection load method: 1.load collection with async=True 2.loading_progress expected: loading part entities """ collection_w = self.init_collection_wrap() df = cf.gen_default_dataframe_data() collection_w.insert(df) assert collection_w.num_entities == ct.default_nb collection_w.load(_async=True) res, _ = self.utility_wrap.loading_progress(collection_w.name) assert (0 < res[num_loaded_entities] <= ct.default_nb) @pytest.mark.tag(CaseLabel.L1) def test_loading_progress_empty_collection(self): """ target: test loading_progress on a empty collection method: 1.create collection and no insert 2.loading_progress expected: 0 entities is loaded """ collection_w = self.init_collection_wrap() collection_w.load() res, _ = self.utility_wrap.loading_progress(collection_w.name) exp_res = {num_loaded_entities: 0, num_total_entities: 0} assert exp_res == res @pytest.mark.tag(CaseLabel.L1) def test_loading_progress_after_release(self): """ target: test loading progress without loading method: insert and flush data, call loading_progress without loading expected: loaded entities is 0 """ collection_w = self.init_collection_general(prefix, insert_data=True)[0] collection_w.release() exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb} res, _ = self.utility_wrap.loading_progress(collection_w.name) assert res == exp_res @pytest.mark.tag(CaseLabel.L2) def test_loading_progress_with_release_partition(self): """ target: test loading progress after release part partitions method: 1.insert data into two partitions and flush 2.load collection and release onr partition expected: loaded one partition entities """ half = ct.default_nb # insert entities into two partitions, collection flush and load collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half) partition_w.release() res = self.utility_wrap.loading_progress(collection_w.name)[0] assert res[num_total_entities] == half * 2 assert res[num_loaded_entities] == half @pytest.mark.tag(CaseLabel.L2) def test_loading_progress_with_load_partition(self): """ target: test loading progress after load partition method: 1.insert data into two partitions and flush 2.load one partition and loading progress expected: loaded one partition entities """ half = ct.default_nb collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half) collection_w.release() partition_w.load() res = self.utility_wrap.loading_progress(collection_w.name)[0] assert res[num_total_entities] == half * 2 assert res[num_loaded_entities] == half @pytest.mark.tag(CaseLabel.L1) def test_loading_progress_with_partition(self): """ target: test loading progress with partition method: 1.insert data into two partitions and flush, and load 2.loading progress with one partition expected: loaded one partition entities """ half = ct.default_nb collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half) res = self.utility_wrap.loading_progress(collection_w.name, partition_names=[partition_w.name])[0] assert res[num_total_entities] == half assert res[num_loaded_entities] == half @pytest.mark.tags(CaseLabel.L1) def test_wait_loading_collection_empty(self): """ target: test wait_for_loading method: input empty collection expected: no exception raised """ self._connect() cw = self.init_collection_wrap(name=cf.gen_unique_str(prefix)) cw.load() self.utility_wrap.wait_for_loading_complete(cw.name) res, _ = self.utility_wrap.loading_progress(cw.name) exp_res = {num_total_entities: 0, num_loaded_entities: 0} assert res == exp_res @pytest.mark.xfail(reason="pymilvus issue #702") @pytest.mark.tag(CaseLabel.L1) def test_wait_for_loading_complete(self): """ target: test wait for loading collection method: insert 10000 entities and wait for loading complete expected: after loading complete, loaded entities is 10000 """ nb = 6000 collection_w = self.init_collection_wrap() df = cf.gen_default_dataframe_data(nb) collection_w.insert(df) assert collection_w.num_entities == nb collection_w.load(_async=True) self.utility_wrap.wait_for_loading_complete(collection_w.name) res, _ = self.utility_wrap.loading_progress(collection_w.name) assert res[num_loaded_entities] == nb @pytest.mark.tag(CaseLabel.L0) def test_drop_collection(self): """ target: test utility drop collection by name method: input collection name and drop collection expected: collection is dropped """ c_name = cf.gen_unique_str(prefix) self.init_collection_wrap(c_name) assert self.utility_wrap.has_collection(c_name)[0] self.utility_wrap.drop_collection(c_name) assert not self.utility_wrap.has_collection(c_name)[0] def test_drop_collection_repeatedly(self): """ target: test drop collection repeatedly method: 1.collection.drop 2.utility.drop_collection expected: raise exception """ c_name = cf.gen_unique_str(prefix) collection_w = self.init_collection_wrap(c_name) assert self.utility_wrap.has_collection(c_name)[0] collection_w.drop() assert not self.utility_wrap.has_collection(c_name)[0] error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}} self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error) def test_drop_collection_create_repeatedly(self): """ target: test repeatedly create and drop same name collection method: repeatedly create and drop collection expected: no exception """ from time import sleep loops = 3 c_name = cf.gen_unique_str(prefix) for _ in range(loops): self.init_collection_wrap(c_name) assert self.utility_wrap.has_collection(c_name)[0] self.utility_wrap.drop_collection(c_name) assert not self.utility_wrap.has_collection(c_name)[0] sleep(1) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_default(self): """ target: test calculated distance with default params method: calculated distance between two random vectors expected: distance calculated successfully """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} self.utility_wrap.calc_distance(op_l, op_r, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_default_sqrt(self, metric_field, metric): """ target: test calculated distance with default param method: calculated distance with default sqrt expected: distance calculated successfully """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} params = {metric_field: metric} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_default_metric(self, sqrt): """ target: test calculated distance with default param method: calculated distance with default metric expected: distance calculated successfully """ self._connect() vectors_l = cf.gen_vectors(default_nb, default_dim) vectors_r = cf.gen_vectors(default_nb, default_dim) op_l = {"float_vectors": vectors_l} op_r = {"float_vectors": vectors_r} params = {"sqrt": sqrt} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_binary_metric(self, metric_field, metric_binary): """ target: test calculate distance with binary vectors method: calculate distance between binary vectors expected: distance calculated successfully """ self._connect() nb = 10 raw_vectors_l, vectors_l = cf.gen_binary_vectors(nb, default_dim) raw_vectors_r, vectors_r = cf.gen_binary_vectors(nb, default_dim) op_l = {"bin_vectors": vectors_l} op_r = {"bin_vectors": vectors_r} params = {metric_field: metric_binary} vectors_l = raw_vectors_l vectors_r = raw_vectors_r self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric_binary}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_from_collection_ids(self, metric_field, metric, sqrt): """ target: test calculated distance from collection entities method: both left and right vectors are from collection expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb) middle = len(insert_ids) // 2 vectors = vectors[0].loc[:, default_field_name] vectors_l = vectors[:middle] vectors_r = [] for i in range(middle): vectors_r.append(vectors[middle + i]) op_l = {"ids": insert_ids[:middle], "collection": collection_w.name, "field": default_field_name} op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "field": default_field_name} params = {metric_field: metric, "sqrt": sqrt} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_from_collections(self, metric_field, metric, sqrt): """ target: test calculated distance between entities from collections method: calculated distance between entities from two collections expected: distance calculated successfully """ self._connect() nb = 10 prefix_1 = "utility_distance" collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb) collection_w_1, vectors_1, _, insert_ids_1 = self.init_collection_general(prefix_1, True, nb) vectors_l = vectors[0].loc[:, default_field_name] vectors_r = vectors_1[0].loc[:, default_field_name] op_l = {"ids": insert_ids, "collection": collection_w.name, "field": default_field_name} op_r = {"ids": insert_ids_1, "collection": collection_w_1.name, "field": default_field_name} params = {metric_field: metric, "sqrt": sqrt} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_left_vector_and_collection_ids(self, metric_field, metric, sqrt): """ target: test calculated distance from collection entities method: set left vectors as random vectors, right vectors from collection expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb) middle = len(insert_ids) // 2 vectors = vectors[0].loc[:, default_field_name] vectors_l = cf.gen_vectors(nb, default_dim) vectors_r = [] for i in range(middle): vectors_r.append(vectors[middle + i]) op_l = {"float_vectors": vectors_l} op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "field": default_field_name} params = {metric_field: metric, "sqrt": sqrt} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_right_vector_and_collection_ids(self, metric_field, metric, sqrt): """ target: test calculated distance from collection entities method: set right vectors as random vectors, left vectors from collection expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb) middle = len(insert_ids) // 2 vectors = vectors[0].loc[:, default_field_name] vectors_l = vectors[:middle] vectors_r = cf.gen_vectors(nb, default_dim) op_l = {"ids": insert_ids[:middle], "collection": collection_w.name, "field": default_field_name} op_r = {"float_vectors": vectors_r} params = {metric_field: metric, "sqrt": sqrt} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_from_partition_ids(self, metric_field, metric, sqrt): """ target: test calculated distance from one partition entities method: both left and right vectors are from partition expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1) partitions = collection_w.partitions middle = len(insert_ids) // 2 params = {metric_field: metric, "sqrt": sqrt} for i in range(len(partitions)): vectors_l = vectors[i].loc[:, default_field_name] vectors_r = vectors[i].loc[:, default_field_name] op_l = {"ids": insert_ids[:middle], "collection": collection_w.name, "partition": partitions[i].name, "field": default_field_name} op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "partition": partitions[i].name, "field": default_field_name} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_from_partitions(self, metric_field, metric, sqrt): """ target: test calculated distance between entities from partitions method: calculate distance between entities from two partitions expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1) partitions = collection_w.partitions middle = len(insert_ids) // 2 params = {metric_field: metric, "sqrt": sqrt} vectors_l = vectors[0].loc[:, default_field_name] vectors_r = vectors[1].loc[:, default_field_name] op_l = {"ids": insert_ids[:middle], "collection": collection_w.name, "partition": partitions[0].name, "field": default_field_name} op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "partition": partitions[1].name, "field": default_field_name} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_left_vectors_and_partition_ids(self, metric_field, metric, sqrt): """ target: test calculated distance between vectors and partition entities method: set left vectors as random vectors, right vectors are entities expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1) middle = len(insert_ids) // 2 partitions = collection_w.partitions vectors_l = cf.gen_vectors(nb // 2, default_dim) op_l = {"float_vectors": vectors_l} params = {metric_field: metric, "sqrt": sqrt} for i in range(len(partitions)): vectors_r = vectors[i].loc[:, default_field_name] op_r = {"ids": insert_ids[middle:], "collection": collection_w.name, "partition": partitions[i].name, "field": default_field_name} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) @pytest.mark.tags(CaseLabel.L2) def test_calc_distance_right_vectors_and_partition_ids(self, metric_field, metric, sqrt): """ target: test calculated distance between vectors and partition entities method: set right vectors as random vectors, left vectors are entities expected: distance calculated successfully """ self._connect() nb = 10 collection_w, vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, partition_num=1) middle = len(insert_ids) // 2 partitions = collection_w.partitions vectors_r = cf.gen_vectors(nb // 2, default_dim) op_r = {"float_vectors": vectors_r} params = {metric_field: metric, "sqrt": sqrt} for i in range(len(partitions)): vectors_l = vectors[i].loc[:, default_field_name] op_l = {"ids": insert_ids[middle:], "collection": collection_w.name, "partition": partitions[i].name, "field": default_field_name} self.utility_wrap.calc_distance(op_l, op_r, params, check_task=CheckTasks.check_distance, check_items={"vectors_l": vectors_l, "vectors_r": vectors_r, "metric": metric, "sqrt": sqrt}) class TestUtilityAdvanced(TestcaseBase): """ Test case of index interface """ @pytest.mark.tags(CaseLabel.L2) def test_has_collection_multi_collections(self): """ target: test has_collection with collection name method: input collection name created before expected: True """ c_name = cf.gen_unique_str(prefix) c_name_2 = cf.gen_unique_str(prefix) self.init_collection_wrap(name=c_name) self.init_collection_wrap(name=c_name_2) for name in [c_name, c_name_2]: res, _ = self.utility_wrap.has_collection(name) assert res is True @pytest.mark.tags(CaseLabel.L2) def test_list_collections_multi_collection(self): """ target: test list_collections method: create collection, list_collections expected: in the result """ c_name = cf.gen_unique_str(prefix) c_name_2 = cf.gen_unique_str(prefix) self.init_collection_wrap(name=c_name) self.init_collection_wrap(name=c_name_2) res, _ = self.utility_wrap.list_collections() for name in [c_name, c_name_2]: assert name in res def test_drop_multi_collection_concurrent(self): """ target: test concurrent drop collection method: multi thread drop one collection expected: drop successfully """ thread_num = 3 threads = [] c_names = [] num = 5 for i in range(thread_num*num): c_name = cf.gen_unique_str(prefix) self.init_collection_wrap(c_name) c_names.append(c_name) def create_and_drop_collection(names): for name in names: assert self.utility_wrap.has_collection(name)[0] self.utility_wrap.drop_collection(name) assert not self.utility_wrap.has_collection(name)[0] for i in range(thread_num): x = threading.Thread(target=create_and_drop_collection, args=(c_names[i*num:(i+1)*num],)) threads.append(x) x.start() for t in threads: t.join() log.debug(self.utility_wrap.list_collections()[0])
kb_uploadmethodsServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import os import random as _random import sys import traceback from getopt import getopt, GetoptError from multiprocessing import Process from os import environ from wsgiref.simple_server import make_server import requests as _requests from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from biokbase import log from kb_uploadmethods.authclient import KBaseAuth as _KBaseAuth try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'kb_uploadmethods'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from kb_uploadmethods.kb_uploadmethodsImpl import kb_uploadmethods # noqa @IgnorePep8 impl_kb_uploadmethods = kb_uploadmethods(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if len(e.args) == 1: newerr.data = repr(e.args[0]) else: newerr.data = repr(e.args) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if 'types' in self.method_data[request['method']]: self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'kb_uploadmethods' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_kb_uploadmethods.upload_fastq_file, name='kb_uploadmethods.upload_fastq_file', types=[dict]) self.method_authentication['kb_uploadmethods.upload_fastq_file'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.upload_fasta_gff_file, name='kb_uploadmethods.upload_fasta_gff_file', types=[dict]) self.method_authentication['kb_uploadmethods.upload_fasta_gff_file'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.batch_import_genomes_from_staging, name='kb_uploadmethods.batch_import_genomes_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.batch_import_genomes_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.batch_import_assemblies_from_staging, name='kb_uploadmethods.batch_import_assemblies_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.batch_import_assemblies_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.unpack_staging_file, name='kb_uploadmethods.unpack_staging_file', types=[dict]) self.method_authentication['kb_uploadmethods.unpack_staging_file'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.unpack_web_file, name='kb_uploadmethods.unpack_web_file', types=[dict]) self.method_authentication['kb_uploadmethods.unpack_web_file'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_genbank_from_staging, name='kb_uploadmethods.import_genbank_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_genbank_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_sra_from_staging, name='kb_uploadmethods.import_sra_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_sra_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_sra_from_web, name='kb_uploadmethods.import_sra_from_web', types=[dict]) self.method_authentication['kb_uploadmethods.import_sra_from_web'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_fasta_as_assembly_from_staging, name='kb_uploadmethods.import_fasta_as_assembly_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_fasta_as_assembly_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_tsv_as_media_from_staging, name='kb_uploadmethods.import_tsv_as_media_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_tsv_as_media_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_excel_as_media_from_staging, name='kb_uploadmethods.import_excel_as_media_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_excel_as_media_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_tsv_or_excel_as_media_from_staging, name='kb_uploadmethods.import_tsv_or_excel_as_media_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_tsv_or_excel_as_media_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_file_as_fba_model_from_staging, name='kb_uploadmethods.import_file_as_fba_model_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_file_as_fba_model_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_tsv_as_expression_matrix_from_staging, name='kb_uploadmethods.import_tsv_as_expression_matrix_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_tsv_as_expression_matrix_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_reads_from_staging, name='kb_uploadmethods.import_reads_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_reads_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_tsv_as_phenotype_set_from_staging, name='kb_uploadmethods.import_tsv_as_phenotype_set_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_tsv_as_phenotype_set_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.import_attribute_mapping_from_staging, name='kb_uploadmethods.import_attribute_mapping_from_staging', types=[dict]) self.method_authentication['kb_uploadmethods.import_attribute_mapping_from_staging'] = 'required' # noqa self.rpc_service.add(impl_kb_uploadmethods.status, name='kb_uploadmethods.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'kb_uploadmethods ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception as e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print('Request method was %s\n' % environ['REQUEST_METHOD']) # print('Environment dictionary is:\n%s\n' % pprint.pformat(environ)) # print('Request body was: %s' % request_body) # print('Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result)) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body.encode('utf8')] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print("Monkeypatching std libraries for async") from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print("Listening on port %s" % port) if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print("Host set to %s" % host) else: assert False, "unhandled option" start_server(host=host, port=port) # print("Listening on port %s" % port) # httpd = make_server( host, port, application) # # httpd.serve_forever()
__init__.py
""" .. moduleauthor:: Fabio Manganiello <blacklight86@gmail.com> """ import os import queue import stat import tempfile import time from enum import Enum from threading import Thread, Event, RLock from .core import Sound, Mix from platypush.context import get_bus from platypush.message.event.sound import \ SoundRecordingStartedEvent, SoundRecordingStoppedEvent from platypush.plugins import Plugin, action class PlaybackState(Enum): STOPPED = 'STOPPED', PLAYING = 'PLAYING', PAUSED = 'PAUSED' class RecordingState(Enum): STOPPED = 'STOPPED', RECORDING = 'RECORDING', PAUSED = 'PAUSED' class SoundPlugin(Plugin): """ Plugin to interact with a sound device. Triggers: * :class:`platypush.message.event.sound.SoundPlaybackStartedEvent` on playback start * :class:`platypush.message.event.sound.SoundPlaybackStoppedEvent` on playback stop * :class:`platypush.message.event.sound.SoundPlaybackPausedEvent` on playback pause * :class:`platypush.message.event.sound.SoundRecordingStartedEvent` on recording start * :class:`platypush.message.event.sound.SoundRecordingStoppedEvent` on recording stop * :class:`platypush.message.event.sound.SoundRecordingPausedEvent` on recording pause Requires: * **sounddevice** (``pip install sounddevice``) * **soundfile** (``pip install soundfile``) * **numpy** (``pip install numpy``) """ _STREAM_NAME_PREFIX = 'platypush-stream-' _default_input_stream_fifo = os.path.join(tempfile.gettempdir(), 'inputstream') # noinspection PyProtectedMember def __init__(self, input_device=None, output_device=None, input_blocksize=Sound._DEFAULT_BLOCKSIZE, output_blocksize=Sound._DEFAULT_BLOCKSIZE, **kwargs): """ :param input_device: Index or name of the default input device. Use :meth:`platypush.plugins.sound.query_devices` to get the available devices. Default: system default :type input_device: int or str :param output_device: Index or name of the default output device. Use :meth:`platypush.plugins.sound.query_devices` to get the available devices. Default: system default :type output_device: int or str :param input_blocksize: Blocksize to be applied to the input device. Try to increase this value if you get input overflow errors while recording. Default: 1024 :type input_blocksize: int :param output_blocksize: Blocksize to be applied to the output device. Try to increase this value if you get output underflow errors while playing. Default: 1024 :type output_blocksize: int """ super().__init__(**kwargs) self.input_device = input_device self.output_device = output_device self.input_blocksize = input_blocksize self.output_blocksize = output_blocksize self.playback_state = {} self.playback_state_lock = RLock() self.playback_paused_changed = {} self.stream_mixes = {} self.recording_state = RecordingState.STOPPED self.recording_state_lock = RLock() self.recording_paused_changed = Event() self.active_streams = {} self.stream_name_to_index = {} self.stream_index_to_name = {} self.completed_callback_events = {} @staticmethod def _get_default_device(category): """ Query the default audio devices. :param category: Device category to query. Can be either input or output :type category: str """ import sounddevice as sd return sd.query_hostapis()[0].get('default_' + category.lower() + '_device') @action def query_devices(self, category=None): """ Query the available devices :param category: Device category to query. Can be either input or output. Default: None (query all devices) :type category: str :returns: A dictionary representing the available devices. Example:: [ { "name": "pulse", "hostapi": 0, "max_input_channels": 32, "max_output_channels": 32, "default_low_input_latency": 0.008684807256235827, "default_low_output_latency": 0.008684807256235827, "default_high_input_latency": 0.034807256235827665, "default_high_output_latency": 0.034807256235827665, "default_samplerate": 44100 }, { "name": "default", "hostapi": 0, "max_input_channels": 32, "max_output_channels": 32, "default_low_input_latency": 0.008684807256235827, "default_low_output_latency": 0.008684807256235827, "default_high_input_latency": 0.034807256235827665, "default_high_output_latency": 0.034807256235827665, "default_samplerate": 44100 } ] """ import sounddevice as sd devs = sd.query_devices() if category == 'input': devs = [d for d in devs if d.get('max_input_channels') > 0] elif category == 'output': devs = [d for d in devs if d.get('max_output_channels') > 0] return devs def _play_audio_callback(self, q, blocksize, streamtype, stream_index): import sounddevice as sd is_raw_stream = streamtype == sd.RawOutputStream # noinspection PyUnusedLocal def audio_callback(outdata, frames, frame_time, status): if self._get_playback_state(stream_index) == PlaybackState.STOPPED: raise sd.CallbackStop while self._get_playback_state(stream_index) == PlaybackState.PAUSED: self.playback_paused_changed[stream_index].wait() if frames != blocksize: self.logger.warning('Received {} frames, expected blocksize is {}'. format(frames, blocksize)) return if status.output_underflow: self.logger.warning('Output underflow: increase blocksize?') outdata = (b'\x00' if is_raw_stream else 0.) * len(outdata) return if status: self.logger.warning('Audio callback failed: {}'.format(status)) try: data = q.get_nowait() except queue.Empty: self.logger.warning('Buffer is empty: increase buffersize?') raise sd.CallbackStop if len(data) < len(outdata): outdata[:len(data)] = data outdata[len(data):] = (b'\x00' if is_raw_stream else 0.) * \ (len(outdata) - len(data)) else: outdata[:] = data return audio_callback @action def play(self, file=None, sound=None, device=None, blocksize=None, bufsize=None, samplerate=None, channels=None, stream_name=None, stream_index=None): """ Plays a sound file (support formats: wav, raw) or a synthetic sound. :param file: Sound file path. Specify this if you want to play a file :type file: str :param sound: Sound to play. Specify this if you want to play synthetic sounds. You can also create polyphonic sounds by just calling play multiple times. :type sound: Sound. You can initialize it either from a list of `Sound` objects or from its JSON representation, e.g.:: { "midi_note": 69, # 440 Hz A "gain": 1.0, # Maximum volume "duration": 1.0 # 1 second or until release/pause/stop } :param device: Output device (default: default configured device or system default audio output if not configured) :type device: int or str :param blocksize: Audio block size (default: configured `output_blocksize` or 2048) :type blocksize: int :param bufsize: Size of the audio buffer (default: 20 frames for audio files, 2 frames for synth sounds) :type bufsize: int :param samplerate: Audio samplerate. Default: audio file samplerate if in file mode, 44100 Hz if in synth mode :type samplerate: int :param channels: Number of audio channels. Default: number of channels in the audio file in file mode, 1 if in synth mode :type channels: int :param stream_index: If specified, play to an already active stream index (you can get them through :meth:`platypush.plugins.sound.query_streams`). Default: creates a new audio stream through PortAudio. :type stream_index: int :param stream_name: Name of the stream to play to. If set, the sound will be played to the specified stream name, or a stream with that name will be created. If not set, and ``stream_index`` is not set either, then a new stream will be created on the next available index and named ``platypush-stream-<index>``. :type stream_name: str """ if not file and not sound: raise RuntimeError('Please specify either a file to play or a ' + 'list of sound objects') import sounddevice as sd if blocksize is None: blocksize = self.output_blocksize if bufsize is None: if file: bufsize = Sound._DEFAULT_FILE_BUFSIZE else: bufsize = Sound._DEFAULT_SYNTH_BUFSIZE q = queue.Queue(maxsize=bufsize) f = None t = 0. if file: file = os.path.abspath(os.path.expanduser(file)) if device is None: device = self.output_device if device is None: device = self._get_default_device('output') if file: import soundfile as sf f = sf.SoundFile(file) if not samplerate: samplerate = f.samplerate if f else Sound._DEFAULT_SAMPLERATE if not channels: channels = f.channels if f else 1 mix = None with self.playback_state_lock: stream_index, is_new_stream = self._get_or_allocate_stream_index( stream_index=stream_index, stream_name=stream_name) if sound and stream_index in self.stream_mixes: mix = self.stream_mixes[stream_index] mix.add(sound) if not mix: return None, "Unable to allocate the stream" self.logger.info(('Starting playback of {} to sound device [{}] ' + 'on stream [{}]').format( file or sound, device, stream_index)) if not is_new_stream: return # Let the existing callback handle the new mix # TODO Potential support also for mixed streams with # multiple sound files and synth sounds? try: # Audio queue pre-fill loop for _ in range(bufsize): if f: data = f.buffer_read(blocksize, dtype='float32') if not data: break else: duration = mix.duration() blocktime = float(blocksize / samplerate) next_t = min(t + blocktime, duration) \ if duration is not None else t + blocktime data = mix.get_wave(t_start=t, t_end=next_t, samplerate=samplerate) t = next_t if duration is not None and t >= duration: break q.put_nowait(data) # Pre-fill the audio queue stream = self.active_streams[stream_index] completed_callback_event = self.completed_callback_events[stream_index] if stream is None: streamtype = sd.RawOutputStream if file else sd.OutputStream stream = streamtype(samplerate=samplerate, blocksize=blocksize, device=device, channels=channels, dtype='float32', callback=self._play_audio_callback( q=q, blocksize=blocksize, streamtype=streamtype, stream_index=stream_index), finished_callback=completed_callback_event.set) self._start_playback(stream_index=stream_index, stream=stream) with stream: # Timeout set until we expect all the buffered blocks to # be consumed timeout = blocksize * bufsize / samplerate while True: while self._get_playback_state(stream_index) == \ PlaybackState.PAUSED: self.playback_paused_changed[stream_index].wait() if f: data = f.buffer_read(blocksize, dtype='float32') if not data: break else: duration = mix.duration() blocktime = float(blocksize / samplerate) next_t = min(t + blocktime, duration) \ if duration is not None else t + blocktime data = mix.get_wave(t_start=t, t_end=next_t, samplerate=samplerate) t = next_t if duration is not None and t >= duration: break if self._get_playback_state(stream_index) == \ PlaybackState.STOPPED: break try: q.put(data, timeout=timeout) except queue.Full as e: if self._get_playback_state(stream_index) != \ PlaybackState.PAUSED: raise e completed_callback_event.wait() except queue.Full as e: if stream_index is None or \ self._get_playback_state(stream_index) != PlaybackState.STOPPED: self.logger.warning('Playback timeout: audio callback failed?') finally: if f and not f.closed: f.close() f = None self.stop_playback([stream_index]) @action def stream_recording(self, device=None, fifo=None, duration=None, sample_rate=None, dtype='float32', blocksize=None, latency=0, channels=1): """ Return audio data from an audio source :param device: Input device (default: default configured device or system default audio input if not configured) :type device: int or str :param fifo: Path of the FIFO that will be used to exchange audio samples (default: /tmp/inputstream) :type fifo: str :param duration: Recording duration in seconds (default: record until stop event) :type duration: float :param sample_rate: Recording sample rate (default: device default rate) :type sample_rate: int :param dtype: Data type for the audio samples. Supported types: 'float64', 'float32', 'int32', 'int16', 'int8', 'uint8'. Default: float32 :type dtype: str :param blocksize: Audio block size (default: configured `input_blocksize` or 2048) :type blocksize: int :param latency: Device latency in seconds (default: 0) :type latency: float :param channels: Number of channels (default: 1) :type channels: int """ import sounddevice as sd self.recording_paused_changed.clear() if device is None: device = self.input_device if device is None: device = self._get_default_device('input') if sample_rate is None: dev_info = sd.query_devices(device, 'input') sample_rate = int(dev_info['default_samplerate']) if blocksize is None: blocksize = self.input_blocksize if not fifo: fifo = self._default_input_stream_fifo q = queue.Queue() # noinspection PyUnusedLocal def audio_callback(indata, frames, time_duration, status): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() if status: self.logger.warning('Recording callback status: {}'.format(str(status))) q.put(indata.copy()) def streaming_thread(): try: with sd.InputStream(samplerate=sample_rate, device=device, channels=channels, callback=audio_callback, dtype=dtype, latency=latency, blocksize=blocksize): with open(fifo, 'wb') as audio_queue: self.start_recording() get_bus().post(SoundRecordingStartedEvent()) self.logger.info('Started recording from device [{}]'.format(device)) recording_started_time = time.time() while self._get_recording_state() != RecordingState.STOPPED \ and (duration is None or time.time() - recording_started_time < duration): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() get_args = { 'block': True, 'timeout': max(0, duration - (time.time() - recording_started_time)), } if duration is not None else {} data = q.get(**get_args) if not len(data): continue audio_queue.write(data) except queue.Empty: self.logger.warning('Recording timeout: audio callback failed?') finally: self.stop_recording() get_bus().post(SoundRecordingStoppedEvent()) if os.path.exists(fifo): if stat.S_ISFIFO(os.stat(fifo).st_mode): self.logger.info('Removing previous input stream FIFO {}'.format(fifo)) os.unlink(fifo) else: raise RuntimeError('{} exists and is not a FIFO. Please remove it or rename it'.format(fifo)) os.mkfifo(fifo, 0o644) Thread(target=streaming_thread).start() @action def record(self, outfile=None, duration=None, device=None, sample_rate=None, format=None, blocksize=None, latency=0, channels=1, subtype='PCM_24'): """ Records audio to a sound file (support formats: wav, raw) :param outfile: Sound file (default: the method will create a temporary file with the recording) :type outfile: str :param duration: Recording duration in seconds (default: record until stop event) :type duration: float :param device: Input device (default: default configured device or system default audio input if not configured) :type device: int or str :param sample_rate: Recording sample rate (default: device default rate) :type sample_rate: int :param format: Audio format (default: WAV) :type format: str :param blocksize: Audio block size (default: configured `input_blocksize` or 2048) :type blocksize: int :param latency: Device latency in seconds (default: 0) :type latency: float :param channels: Number of channels (default: 1) :type channels: int :param subtype: Recording subtype - see `Soundfile docs - Subtypes <https://pysoundfile.readthedocs.io/en/0.9.0/#soundfile.available_subtypes>`_ for a list of the available subtypes (default: PCM_24) :type subtype: str """ def recording_thread(outfile, duration, device, sample_rate, format, blocksize, latency, channels, subtype): import sounddevice as sd self.recording_paused_changed.clear() if outfile: outfile = os.path.abspath(os.path.expanduser(outfile)) if os.path.isfile(outfile): self.logger.info('Removing existing audio file {}'.format(outfile)) os.unlink(outfile) else: outfile = tempfile.NamedTemporaryFile( prefix='recording_', suffix='.wav', delete=False, dir=tempfile.gettempdir()).name if device is None: device = self.input_device if device is None: device = self._get_default_device('input') if sample_rate is None: dev_info = sd.query_devices(device, 'input') sample_rate = int(dev_info['default_samplerate']) if blocksize is None: blocksize = self.input_blocksize q = queue.Queue() def audio_callback(indata, frames, duration, status): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() if status: self.logger.warning('Recording callback status: {}'.format( str(status))) q.put({ 'timestamp': time.time(), 'frames': frames, 'time': duration, 'data': indata.copy() }) try: import soundfile as sf import numpy with sf.SoundFile(outfile, mode='w', samplerate=sample_rate, format=format, channels=channels, subtype=subtype) as f: with sd.InputStream(samplerate=sample_rate, device=device, channels=channels, callback=audio_callback, latency=latency, blocksize=blocksize): self.start_recording() get_bus().post(SoundRecordingStartedEvent(filename=outfile)) self.logger.info('Started recording from device [{}] to [{}]'. format(device, outfile)) recording_started_time = time.time() while self._get_recording_state() != RecordingState.STOPPED \ and (duration is None or time.time() - recording_started_time < duration): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() get_args = { 'block': True, 'timeout': max(0, duration - (time.time() - recording_started_time)), } if duration is not None else {} data = q.get(**get_args) if data and time.time() - data.get('timestamp') <= 1.0: # Only write the block if the latency is still acceptable f.write(data['data']) f.flush() except queue.Empty: self.logger.warning('Recording timeout: audio callback failed?') finally: self.stop_recording() get_bus().post(SoundRecordingStoppedEvent(filename=outfile)) Thread(target=recording_thread, args=( outfile, duration, device, sample_rate, format, blocksize, latency, channels, subtype) ).start() @action def recordplay(self, duration=None, input_device=None, output_device=None, sample_rate=None, blocksize=None, latency=0, channels=1, dtype=None): """ Records audio and plays it on an output sound device (audio pass-through) :param duration: Recording duration in seconds (default: record until stop event) :type duration: float :param input_device: Input device (default: default configured device or system default audio input if not configured) :type input_device: int or str :param output_device: Output device (default: default configured device or system default audio output if not configured) :type output_device: int or str :param sample_rate: Recording sample rate (default: device default rate) :type sample_rate: int :param blocksize: Audio block size (default: configured `output_blocksize` or 2048) :type blocksize: int :param latency: Device latency in seconds (default: 0) :type latency: float :param channels: Number of channels (default: 1) :type channels: int :param dtype: Data type for the recording - see `Soundfile docs - Recording <https://python-sounddevice.readthedocs.io/en/0.3.12/_modules/sounddevice.html#rec>`_ for available types (default: input device default) :type dtype: str """ import sounddevice as sd self.recording_paused_changed.clear() if input_device is None: input_device = self.input_device if input_device is None: input_device = self._get_default_device('input') if output_device is None: output_device = self.output_device if output_device is None: output_device = self._get_default_device('output') if sample_rate is None: dev_info = sd.query_devices(input_device, 'input') sample_rate = int(dev_info['default_samplerate']) if blocksize is None: blocksize = self.output_blocksize # noinspection PyUnusedLocal def audio_callback(indata, outdata, frames, time, status): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() if status: self.logger.warning('Recording callback status: {}'.format( str(status))) outdata[:] = indata stream_index = None try: import soundfile as sf import numpy stream_index = self._allocate_stream_index() stream = sd.Stream(samplerate=sample_rate, channels=channels, blocksize=blocksize, latency=latency, device=(input_device, output_device), dtype=dtype, callback=audio_callback) self.start_recording() self._start_playback(stream_index=stream_index, stream=stream) self.logger.info('Started recording pass-through from device ' + '[{}] to sound device [{}]'. format(input_device, output_device)) recording_started_time = time.time() while self._get_recording_state() != RecordingState.STOPPED \ and (duration is None or time.time() - recording_started_time < duration): while self._get_recording_state() == RecordingState.PAUSED: self.recording_paused_changed.wait() time.sleep(0.1) except queue.Empty as e: self.logger.warning('Recording timeout: audio callback failed?') finally: self.stop_playback([stream_index]) self.stop_recording() @action def query_streams(self): """ :returns: A list of active audio streams """ streams = { i: { attr: getattr(stream, attr) for attr in ['active', 'closed', 'stopped', 'blocksize', 'channels', 'cpu_load', 'device', 'dtype', 'latency', 'samplerate', 'samplesize'] if hasattr(stream, attr) } for i, stream in self.active_streams.items() } for i, stream in streams.items(): stream['playback_state'] = self.playback_state[i].name stream['name'] = self.stream_index_to_name.get(i) if i in self.stream_mixes: stream['mix'] = {j: sound for j, sound in enumerate(list(self.stream_mixes[i]))} return streams def _get_or_allocate_stream_index(self, stream_index=None, stream_name=None, completed_callback_event=None): stream = None with self.playback_state_lock: if stream_index is None: if stream_name is not None: stream_index = self.stream_name_to_index.get(stream_name) else: if stream_name is not None: raise RuntimeError('Redundant specification of both ' + 'stream_name and stream_index') if stream_index is not None: stream = self.active_streams.get(stream_index) if not stream: return (self._allocate_stream_index(stream_name=stream_name, completed_callback_event= completed_callback_event), True) return (stream_index, False) def _allocate_stream_index(self, stream_name=None, completed_callback_event=None): stream_index = None with self.playback_state_lock: for i in range(len(self.active_streams) + 1): if i not in self.active_streams: stream_index = i break if stream_index is None: raise RuntimeError('No stream index available') if stream_name is None: stream_name = self._STREAM_NAME_PREFIX + str(stream_index) self.active_streams[stream_index] = None self.stream_mixes[stream_index] = Mix() self.stream_index_to_name[stream_index] = stream_name self.stream_name_to_index[stream_name] = stream_index self.completed_callback_events[stream_index] = \ completed_callback_event if completed_callback_event else Event() return stream_index def _start_playback(self, stream_index, stream): with self.playback_state_lock: self.playback_state[stream_index] = PlaybackState.PLAYING self.active_streams[stream_index] = stream if isinstance(self.playback_paused_changed.get(stream_index), Event): self.playback_paused_changed[stream_index].clear() else: self.playback_paused_changed[stream_index] = Event() self.logger.info('Playback started on stream index {}'. format(stream_index)) return stream_index @action def stop_playback(self, streams=None): """ :param streams: Streams to stop by index or name (default: all) :type streams: list[int] or list[str] """ with self.playback_state_lock: streams = streams or self.active_streams.keys() if not streams: return completed_callback_events = {} for i in streams: stream = self.active_streams.get(i) if not stream: i = self.stream_name_to_index.get(i) stream = self.active_streams.get(i) if not stream: self.logger.info('No such stream index or name: {}'. format(i)) continue if self.completed_callback_events[i]: completed_callback_events[i] = self.completed_callback_events[i] self.playback_state[i] = PlaybackState.STOPPED for i, event in completed_callback_events.items(): event.wait() if i in self.completed_callback_events: del self.completed_callback_events[i] if i in self.active_streams: del self.active_streams[i] if i in self.stream_mixes: del self.stream_mixes[i] if i in self.stream_index_to_name: name = self.stream_index_to_name[i] del self.stream_index_to_name[i] if name in self.stream_name_to_index: del self.stream_name_to_index[name] self.logger.info('Playback stopped on streams [{}]'.format( ', '.join([str(stream) for stream in completed_callback_events.keys()]))) @action def pause_playback(self, streams=None): """ :param streams: Streams to pause by index (default: all) :type streams: list[int] """ with self.playback_state_lock: streams = streams or self.active_streams.keys() if not streams: return for i in streams: stream = self.active_streams.get(i) if not stream: i = self.stream_name_to_index.get(i) stream = self.active_streams.get(i) if not stream: self.logger.info('No such stream index or name: {}'. format(i)) continue stream = self.active_streams[i] if self.playback_state[i] == PlaybackState.PAUSED: self.playback_state[i] = PlaybackState.PLAYING elif self.playback_state[i] == PlaybackState.PLAYING: self.playback_state[i] = PlaybackState.PAUSED else: continue self.playback_paused_changed[i].set() self.logger.info('Playback pause toggled on streams [{}]'.format( ', '.join([str(stream) for stream in streams]))) def start_recording(self): with self.recording_state_lock: self.recording_state = RecordingState.RECORDING @action def stop_recording(self): with self.recording_state_lock: self.recording_state = RecordingState.STOPPED self.logger.info('Recording stopped') @action def pause_recording(self): with self.recording_state_lock: if self.recording_state == RecordingState.PAUSED: self.recording_state = RecordingState.RECORDING elif self.recording_state == RecordingState.RECORDING: self.recording_state = RecordingState.PAUSED else: return self.logger.info('Recording paused state toggled') self.recording_paused_changed.set() @action def release(self, stream_index=None, stream_name=None, sound_index=None, midi_note=None, frequency=None): """ Remove a sound from an active stream, either by sound index (use :meth:`platypush.sound.plugin.SoundPlugin.query_streams` to get the sounds playing on the active streams), midi_note, frequency or absolute file path. :param stream_index: Stream index (default: sound removed from all the active streams) :type stream_index: int :param stream_name: Stream name (default: sound removed from all the active streams) :type stream_index: str :param sound_index: Sound index :type sound_index: int :param midi_note: MIDI note :type midi_note: int :param frequency: Sound frequency :type frequency: float """ if stream_name: if stream_index: raise RuntimeError('stream_index and stream name are ' + 'mutually exclusive') stream_index = self.stream_name_to_index.get(stream_name) mixes = { i: mix for i, mix in self.stream_mixes.items() } if stream_index is None else { stream_index: self.stream_mixes[stream_index] } streams_to_stop = [] for i, mix in mixes.items(): for j, sound in enumerate(mix): if (sound_index is not None and j == sound_index) or \ (midi_note is not None and sound.get('midi_note') == midi_note) or \ (frequency is not None and sound.get('frequency') == frequency): if len(list(mix)) == 1: # Last sound in the mix streams_to_stop.append(i) else: mix.remove(j) if streams_to_stop: self.stop_playback(streams_to_stop) def _get_playback_state(self, stream_index): with self.playback_state_lock: return self.playback_state[stream_index] def _get_recording_state(self): with self.recording_state_lock: return self.recording_state # vim:sw=4:ts=4:et:
mpl_process.py
import multiprocessing as mp import signal import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from time import time, sleep import sys import queue class PlotProcess: def __init__(self, fig_updater, interval=10): self._queue = mp.Queue() self._exit_event = mp.Event() args = ( self._queue, self._exit_event, fig_updater, interval / 1000.0 ) self._process = mp.Process(target=plot_process_program, args=args, daemon=True) def start(self): self._process.start() def put_data(self, data): if self._process.exitcode is not None: self.close() raise PlotProccessDiedException try: self._queue.put(data) except BrokenPipeError: self.close() raise PlotProccessDiedException def close(self): if self._process.exitcode is None: self._exit_event.set() self._process.join(1) if self._process.exitcode is None: self._process.terminate() self._process.join(1) if self._process.exitcode is None: raise RuntimeError def plot_process_program(q, exit_event, fig_updater, interval): signal.signal(signal.SIGINT, signal.SIG_IGN) last_t = None artists = [] fig = plt.figure() def anim_func(frame): nonlocal artists, last_t if exit_event.is_set(): sys.exit() if last_t: now = time() sleep_t = (last_t + interval - 0.001) - now if sleep_t > 0.001: sleep(sleep_t) last_t = time() data = None while True: try: data = q.get(timeout=0.001) except queue.Empty: break if data is not None: if not artists: artists = fig_updater.first(data) else: fig_updater.update(data) return artists fig_updater.setup(fig) anim = FuncAnimation(fig, anim_func, interval=0, blit=True) # noqa: F841 try: plt.show() except AttributeError: pass class PlotProccessDiedException(Exception): pass class FigureUpdater: def setup(self, fig): self.fig = fig def first(self, data): return [] # iterable of artists def update(self, data): pass class ExampleFigureUpdater(FigureUpdater): def setup(self, fig): self.fig = fig self.ax = fig.subplots() self.ax.set_xlabel("t") self.ax.set_ylabel("sin(t)") def first(self, data): x, y = data self.line = self.ax.plot(x, y)[0] return [self.line] def update(self, data): x, y = data self.line.set_ydata(y) # Example usage: if __name__ == "__main__": import numpy as np fu = ExampleFigureUpdater() pp = PlotProcess(fu) pp.start() x = np.linspace(0, 10, 100) t0 = time() t = 0 while t < 10: t = time() - t0 y = np.sin(x + t) try: pp.put_data([x, y]) except PlotProccessDiedException: exit() sleep(0.001) # not necessary pp.close()
FilesManagement.py
import os import shutil import re import win32api import threading import zipfile def move_file(origen,destino): '''Mueve el archivo de la ruta de orgen a la ruta de destino, verifica que la extension del archivo sea la misma''' if file_type(origen) != "dir" and file_type(origen) != file_type(destino): raise ValueError("The extension of the origin and destination file does not match") #Raises an exception if the file type doesn´t match count = 0 #Aux variable to know the "number" of the file in case the file name is repeated in the destination folder while (isfile(origen) and isfile(destino)) or (isdir(origen) and isdir(destino)): #Verifies that the destination file or folder does not exist. count += 1 if isdir(origen): result=re.search(r".*\([0-9]+\)$",destino) #Verifies if the destination path already has a number at the end if result == None: destino += "({})".format(count) #If it does not have a number, it adds the count variable else: destino = "(".join(destino.split("(")[:-1])+"({})".format(count) #If it has a number at the end, it replaces it with the count value elif isfile(origen): extension = file_type(origen) result=re.search(r".*\([0-9]+\)\."+extension,destino) #Verifies if the destination path already has a number at the end if result == None: destino = destino.split(".")[0]+"({}).{}".format(count,extension) #If it has not a number, adds the count Variable else: destino = "(".join(destino.split("(")[:-1])+"({}).{}".format(count,extension) #If it has a number, replaces the number with the count value shutil.move(origen,destino) #Mueve el archivo de la ruta "origen" a la ruta "destino" def file_type(file): '''Devuelve la extension del archivo que entra como parametro''' if isdir(file): return "dir" #Retorna "dir" si el archivo es una carpeta return file.split(".")[-1] #Separa el string por puntos y devuelve el ultimo elemento de la lista, correspondiente a la extension del archivo def isdir(ruta): '''Verifica si la ruta ingresada es o no una carpeta''' return os.path.isdir(ruta) def isfile(ruta): '''Verifica si la ruta ingresada es o no un archivo''' return os.path.isfile(ruta) def unir_ruta(ruta,archivo): '''Retorna un string con la ruta absoluta del archivo''' return os.path.join(ruta,archivo) def lista_archivos(carpeta): '''Devuelve una lista con el nombre de todos los archivos y las sub carpetas de la carpeta que entra como parametro''' return os.listdir(carpeta) def fecha_creacion(archivo): '''Retorna la fecha de creación de un archivo, recive como entrada la ruta absoluta del archivo''' return os.path.getctime(archivo) def ordenar_por_fecha(carpeta,archivos): '''Retorna la lista de archivos y carpetas ordenada por su fecha de creación de menor a mayor (archivos mas viejos primero) Recive como parametros carpeta (La ruta absoluta de la carpeta) y archivos (la lista de los nombres de los arhivos de la carpeta)''' aux = [unir_ruta(carpeta,x) for x in archivos] #Crea una lista con las rutas absolutas de todos los archivos en la carpeta aux.sort(reverse=True,key=fecha_creacion) #Organiza la lista aux por su fecha de creacion y la Retorna return aux def clasificar_Archivo(archivo): '''Retorna un string con el nombre de la categoria del archivo Como entrada recive archivo, la ruta del archivo a clasificar''' imagenes=["JPG","JPEG","PNG","GIF"] #Lista con los formatos de imagenes mas comunes videos=["AVI","MP4","MPEG-4","MKV","FLV","MOV","WMV"] #Lista con los formatos de video mas comunes libros=["PDF","EPUB"] #Lista con los formatos de libros mas comunes ejecutables=["EXE"] #Lista con los formatos de archivos ejecutables compressedFolder=["ZIP","RAR","7Z"] #List of common compressed files' formats if isdir(archivo): return "folder" #retorna folder si el archivo es una carpeta if file_type(archivo).upper() in compressedFolder: return "compressed" #Returns compressed if the file is a compressed file if file_type(archivo).upper() in imagenes: return "imagen" #retorna imagen si el archivo es una imagen if file_type(archivo).upper() in videos: return "video" #retorna video si el archivo es un video if file_type(archivo).upper() in libros: return "libro" #retorna libro si el archivo es un libro if file_type(archivo).upper() in ejecutables: return "ejecutable" #retorna ejecutable si el archivo es un .exe return "otro" #Retorna otro si la extension del archivo no corresponde a ninguna categoria def delete_file(file): '''Elimina el archivo que entra como parametro''' os.remove(file) def create_dir(dir): '''Creates a new directory in the specidief path (dir)''' os.mkdir(dir) def get_filename(path): '''Return the name of the file for a given path ''' return os.path.basename(path) def showMessage(message,title="",type="info",urgent=True,stop=True): '''Displays a windows MessageBox message: The message to be displayed type: the type of message to be displayed ["info","exclamation","question","warming","asterisk","stop","error","hand"] urgent = boolean, indicates if the message will be shown on top of all other open apps or not Stop = booleam, indicates if the message will stop the program, or if it will run on a separate thread ''' if not stop: thread = threading.Thread(target=showMessage,args=(message,title,type,urgent)) #Creates a separate thread with the function thread.start() #Stars the thread else: types ={"info":0x00000040,"exclamation":0x00000030,"question":0x00000020,"warming":0x00000030, "asterisk":0x00000040,"stop":0x00000010,"error":0x00000010,"hand":0x00000010} #Dictionary that contains the values needed to display the selected icon if type not in types.keys(): #Verifies that the type is an available type type = "info" value = types[type] #int value with the configuration for the MessageBox command if urgent: value += 0x00001000 #Ads the top must configuration to show the message over all apps win32api.MessageBox(0, message, title, value) #Displays the message def unzip(path,outdir="",remove=False): '''Unzips the file in the path to the outdir folder path: Absolute path of the zip files outdir: Destination dir of the uncompressed folder remove: Boolean, indicates if the file needs to be removed after beeing descompressed''' if file_type(path).lower() != "zip": #Raises an error if the path file is not a zip file raise Exception ("{} is not a zip file!".format(path)) if outdir=="": #Sets the out dir to the same name as the file if this is not specified outdir = path.replace(".{}".format(file_type(path)),"") if not isdir(outdir): #Creates the out directory if this is not already created create_dir(outdir) with zipfile.ZipFile(path,"r") as zip_ref: zip_ref.extractall(outdir) #Extracts the zip file if remove: delete_file(path) #Removes the zip file if indicated so
dark_reaper.py
# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <vgaronne@gmail.com>, 2016-2018 # - Martin Barisits <martin.barisits@cern.ch>, 2016 # - Thomas Beermann <thomas.beermann@cern.ch>, 2016-2019 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 # # PY3K COMPATIBLE ''' Dark Reaper is a daemon to manage quarantined file deletion. ''' import hashlib import logging import os import random import socket import sys import threading import time import traceback from rucio.common.config import config_get from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) from rucio.core import rse as rse_core from rucio.core.heartbeat import live, die, sanity_check from rucio.core.message import add_message from rucio.core.quarantined_replica import (list_quarantined_replicas, delete_quarantined_replicas, list_rses) from rucio.rse import rsemanager as rsemgr logging.getLogger("requests").setLevel(logging.CRITICAL) logging.basicConfig(stream=sys.stdout, level=getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper()), format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s') GRACEFUL_STOP = threading.Event() def reaper(rses=[], worker_number=1, total_workers=1, chunk_size=100, once=False, scheme=None): """ Main loop to select and delete files. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. :param worker_number: The worker number. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param scheme: Force the reaper to use a particular protocol, e.g., mock. """ logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses)) pid = os.getpid() thread = threading.current_thread() hostname = socket.gethostname() executable = ' '.join(sys.argv) hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest() sanity_check(executable=None, hostname=hostname) while not GRACEFUL_STOP.is_set(): try: # heartbeat heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals())) nothing_to_do = True random.shuffle(rses) for rse_id in rses: rse = rse_core.get_rse_name(rse_id=rse_id) rse_info = rsemgr.get_rse_info(rse) replicas = list_quarantined_replicas(rse_id=rse_id, limit=chunk_size, worker_number=worker_number, total_workers=total_workers) rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id) prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme) deleted_replicas = [] try: prot.connect() for replica in replicas: nothing_to_do = False try: pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info, lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}], operation='delete', scheme=scheme).values()[0]) logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse) start = time.time() prot.delete(pfn) duration = time.time() - start logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, duration) add_message('deletion-done', {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse, 'rse_id': rse_id, 'file-size': replica.get('bytes') or 0, 'bytes': replica.get('bytes') or 0, 'url': pfn, 'duration': duration, 'protocol': prot.attributes['scheme']}) deleted_replicas.append(replica) except SourceNotFound: err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse) logging.warning(err_msg) deleted_replicas.append(replica) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, str(error)) logging.warning(err_msg) add_message('deletion-failed', {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse, 'rse_id': rse_id, 'file-size': replica['bytes'] or 0, 'bytes': replica['bytes'] or 0, 'url': pfn, 'reason': str(error), 'protocol': prot.attributes['scheme']}) except: logging.critical(traceback.format_exc()) finally: prot.close() delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas) if once: break if once: break if nothing_to_do: logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers) time.sleep(60) except DatabaseException as error: logging.warning('Reaper: %s', str(error)) except: logging.critical(traceback.format_exc()) die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) logging.info('Graceful stop requested') logging.info('Graceful stop done') return def stop(signum=None, frame=None): """ Graceful exit. """ GRACEFUL_STOP.set() def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None, exclude_rses=None, include_rses=None, delay_seconds=0, all_rses=False): """ Starts up the reaper threads. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param threads_per_worker: Total number of threads created by each worker. :param once: If True, only runs one iteration of the main loop. :param greedy: If True, delete right away replicas with tombstone. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock. :param exclude_rses: RSE expression to exclude RSEs from the Reaper. :param include_rses: RSE expression to include RSEs. """ logging.info('main: starting processes') if all_rses: rses = list_rses() elif not rses: rses = [rse['id'] for rse in rse_core.list_rses()] else: rses = [rse_core.get_rse_id(rse=rse) for rse in rses] threads = [] for worker in range(total_workers): kwargs = {'worker_number': worker, 'total_workers': total_workers, 'rses': rses, 'once': once, 'chunk_size': chunk_size, 'scheme': scheme} threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers))) [t.start() for t in threads] while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
Frontend.py
import threading from time import sleep from mopidy import core from MainScreen import MainScreen from InputManager import InputManager from DisplayObject import DisplayObject import pykka class FrontendAdafruitCharLCDPlate(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(FrontendAdafruitCharLCDPlate, self).__init__() self.input_manager = InputManager() self.display_object = DisplayObject() if True: import Adafruit_CharLCD as LCD self.display = LCD.Adafruit_CharLCDPlate() else: from web_socket_lcd_simulator import WebSockectLCDSimulator self.display = WebSockectLCDSimulator() self.main_screen = MainScreen(core) self.running = True def on_start(self): # Add newline self.display.set_color(1.0, 0.0, 0.0) self.display.create_char(0, [16, 16, 16, 16, 16, 16, 0, 0]) self.display.create_char(1, [24, 24, 24, 24, 24, 24, 0, 0]) self.display.create_char(2, [28, 28, 28, 28, 28, 28, 0, 0]) self.display.create_char(3, [30, 30, 30, 30, 30, 30, 0, 0]) self.display.create_char(4, [31, 31, 31, 31, 31, 31, 0, 0]) try: self.display.on_start() except AttributeError: pass t = threading.Thread(target=self.start_working) t.start() def on_stop(self): self.running = False try: self.display.on_stop() except AttributeError: pass def send_screen_update(self): self.display.clear() self.display.message(self.display_object.getString()) def start_working(self): while self.running: self.update() sleep(0.03) def update(self): # Check inputs for event in self.input_manager.update(self.display): print event self.main_screen.input_event(event) if self.main_screen.check_and_update(self.display_object, True) or self.display_object.update(): self.send_screen_update() # Events def playback_state_changed(self, old_state, new_state): self.main_screen.playback_state_changed(old_state, new_state) def track_playback_started(self, tl_track): self.main_screen.track_playback_started(tl_track) def track_playback_ended(self, tl_track, time_position): self.main_screen.track_playback_ended(tl_track, time_position) def track_playback_paused(self, tl_track, time_position): self.main_screen.track_playback_paused(tl_track, time_position) def track_playback_resumed(self, tl_track, time_position): self.main_screen.track_playback_resumed(tl_track, time_position) def seeked(self, time_position): self.main_screen.seeked(time_position) def volume_changed(self, volume): self.main_screen.volume_changed(volume) def stream_title_changed(self, title): self.main_screen.stream_title_changed(title) def playlists_loaded(self): self.main_screen.playlists_loaded()
gui.py
from tkinter import * from ycore.application import * from ycore.module.core import * from ycore.network.network import * from ycore.network.util import * from ycore.builder import * from ycore.client import * from anagramscore import * import threading import json # Current GUI design is quick and dirty. # Better versions of this GUI--if I seek to continue developing # this nuclear meltdown--may come out in the future. Please bear with me. # TODO: Provide a unified controller for /ALL/ bots. class App: def __init__(self): root = Tk() root.geometry("350x300+300+300") frame = Frame(root) frame.pack(fill=BOTH, expand=1) # Make rows... frame.rowconfigure(0, pad=3) frame.rowconfigure(1, pad=3) frame.rowconfigure(2, pad=3) frame.rowconfigure(3, pad=3) frame.rowconfigure(4, pad=3, weight=1) frame.columnconfigure(0, pad=3, weight=1) frame.columnconfigure(1, pad=3) frame.columnconfigure(2, pad=3) frame.columnconfigure(3, pad=3) frame.columnconfigure(4, pad=3) #Make GUI... Label(frame, text="Username:").grid(sticky=W, row=0, column=0) self.username = Entry(frame) self.username.grid(row=1, sticky=E+W, column=0, columnspan=2, padx=5) Label(frame, text="Password:").grid(sticky=W, row=2, column=0) self.password = Entry(frame) self.password.grid(row=3, sticky=E+W, column=0, columnspan=2, padx=5) self.button_stop = Button(frame, text="Disconnect", width=12, state=DISABLED, command=self.disconnect) self.button_go = Button(frame, text="Connect", width=12, command=self.connect) self.button_clear = Button(frame, text="Clear", width=12, command=self.clear) self.button_configure = Button(frame, text="Reload config", width=24, command=self.loadConfig) self.button_stop.grid(row=1, column=2) self.button_go.grid(row=1, column=3) self.button_clear.grid(row=5, column=3) self.button_configure.grid(row=3, column=2, columnspan=2) self.text = Text(frame) self.text.grid(row=4, sticky=E+W+N+S, column=0, rowspan=1, columnspan=4, padx=5, pady=5) self.scrollbar_log = Scrollbar(frame, command=self.text.yview, orient=VERTICAL) self.scrollbar_log.grid(row=4, column=3, columnspan=4, sticky=N+S+E, padx=5) self.text.configure(yscrollcommand=self.scrollbar_log.set) # Load data... self.loadConfig() self.init() root.mainloop() def clear(self): self.text.delete("1.0", END) def loadConfig(self): # Load config data... self.config = ConfigLoader().load() self.user = self.config['user'] self.owner = self.config['owner'] self.passwd = self.config['password'] self.wordlist = self.config['wordlist'] self.bet = self.config['betamount'] self.betpercent = self.config['betpercent'] self.chat = self.config['chat'] self.ip = self.config['ip'] self.port = self.config['port'] self.username.delete(0, END) self.password.delete(0, END) self.password.insert(0, self.passwd) self.username.insert(0, self.user) def init(self): # Initialize core... logBuilder = ChatnetLogMethodBuilder(1).setMethod(WindowLogMethod(self.text)) logger = logBuilder.addFilter("^MSG:PUB(M)?:(.+?):(.+)$").build() clientBuilder = ChatnetClientBuilder().networkManager( LogWriter(ThrottledNetwork(ChatNetworkManager(self.ip, self.port)), logger)) self.client = clientBuilder.build() self.bot = BotApplication(self.client) self.bot.enableModule(CoreModule(self.username.get(), self.password.get())) # Could make observer... self.bot.enableModule(AnagramsModule(self.chat, self.betpercent, self.bet, self.wordlist, self.client.getQueue(), self.owner)) def connect(self): # Runs main bot thread... self.init() threading.Thread(target=self.bot.start).start() self.button_go.configure(state=DISABLED) self.button_stop.configure(state=ACTIVE) def disconnect(self): # Disconnects gracefully from server... try: self.client.getQueue().close() except: pass self.button_go.configure(state=ACTIVE) self.button_stop.configure(state=DISABLED) class ConfigLoader: def __init__(self, filename="config.ini"): self.filename = filename self.dict = {} def load(self): with open(self.filename) as f: self.dict = json.load(f) return self.dict class WindowLogMethod(LogMethod): def __init__(self, text): super().__init__() self.text = text def write(self, message): # Make template for message filter checking... for m in message.split('\n'): if (not super().matches(m)): self.text.insert(END, super().parse(m)) self.text.yview(END) App()
echo_server.py
import socket import ipaddress import sys import select import threading import queue import time import os from threading import Thread #initiliaze IP, port and name SIZE = 1024 PORT = int(sys.argv[1]) NAME = sys.argv[2] COUNT = 0 ip = socket.gethostbyname(socket.gethostname()) HOST = ip openservers = [] openservers.append(HOST) respondservers = [] respondservers.append(HOST) #listen UDP packets def listenUDP(): global COUNT s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('', PORT)) while True: data, addr = s.recvfrom(SIZE) if data: data = str(data) if data[2:10] == 'discover': # parsing if the received data is a discover bracIndex = data.find('[') bracStop = data.find(',', bracIndex) otherName = data [bracIndex + 1:bracStop] otherIPStop = data.find(',', bracStop + 1) otherIP = data[bracStop + 1:otherIPStop] duplicate = False for x in openservers: if (x == otherIP): duplicate = True if not duplicate and HOST != otherIP: print('From ', otherIP,', ', otherName + ' : DISCOVER ME!') openservers.append(otherName) resp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) resp.settimeout(2) COUNT = COUNT + 1 if COUNT > 2: openservers.clear() respondservers.clear() openservers.append(HOST) respondservers.append(HOST) COUNT = 0 resp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) resp.settimeout(2) try: if HOST != otherIP: resp.connect((addr[0], PORT)) msg = 'response,broadcast TCP,[' + NAME + ',' + HOST + ',' + 'response' + ']' msg = msg.encode('ascii') resp.sendall(bytes(msg)) resp.close() except: print('err 2') #listen TCP packets def listenTCP(): tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tcp.bind(('',PORT)) while True: tcp.listen() conn, addr = tcp.accept() data = conn.recv(SIZE) if data: data = str(data) if data [2:9] == 'message': # parsing if the received data is a massage nameIndex = data.find('[') nameStop = data.find(',', nameIndex) name = data [nameIndex + 1:nameStop] msgIndex = data.find(',', nameStop+1 ) msgIndex = data.find(',', msgIndex+1 ) msgStop = data.find(']', msgIndex+1 ) msg = data[msgIndex + 1 : msgStop] print('From ', addr,', ', name + ' : ' + msg) elif data [2:10] == 'response': # parsing if the received data is a response nameIndex = data.find('[') nameStop = data.find(',', nameIndex) name = data [nameIndex + 1:nameStop] otherIPStop = data.find(',', nameStop + 1) otherIP = data[nameStop + 1:otherIPStop] duplicate = False for x in respondservers: if (x == otherIP): duplicate = True if not duplicate and name != NAME: print('From ', otherIP,', ', name + ' : RESPONDED') respondservers.append(otherIP) def main(): global HOST ThreadUDP = Thread(target=listenUDP) ThreadTCP = Thread(target=listenTCP) ThreadUDP.start() ThreadTCP.start() # try to send discover message to open servers try: ann = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ann.bind(('',0)) ann.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1) #ann.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) msg = 'discover,broadcast UDP,[' + NAME + ',' + HOST + ',' + 'announce' + ']' msg = msg.encode('ascii') #broadcast 3 times to cover up losses for x in range(3): ann.sendto(msg,('<broadcast>', PORT)) bcast = '192.168.1.' #for some reason the sendto() function with <broadcast> is not sent to #all ip addresses accross the network, #so it is done with the for loop below for y in range(256): bcastTO = bcast + str(y) for x in range(3): ann.sendto(msg,(bcastTO, PORT)) ann.close() except socket.error as socketerror: print('Error happened :(') print('Your server is now open on : ', HOST,':', PORT) if __name__ == ("__main__"): main()
ib_api.py
''' This script will access the IB API and download the option chain for given securities ''' from threading import Thread from Queue import Queue from ib.opt import ibConnection from ib.ext.Contract import Contract from time import sleep import pandas as pd import logging import traceback # TODO debugging purposes only class IB_API(Thread): ''' This class will establish a connection to IB and group the different operations ''' def __init__(self, event, port=4001, client_id=0): ''' Connection to the IB API ''' super(IB_API, self).__init__() self.event = event # Create logger logging.basicConfig( level=logging.INFO, filename='ib_conn.log', format='%(asctime)s - %(levelname)s - %(message)s') # Create message queues self.input_queue = Queue() self.output_queue = Queue() # Connection variables self.connection = None self.port = port self.client_id = client_id self.reqId = 1 self.status = 'DISCONNECTED' self.keep_alive = True # Dict which relates contract ids with the req id used for retrieval self.reqId_ticker = {} self.opt_chain = MultiDict() self.stk_data = MultiDict() self.contracts = [] self.portfolio_positions = MultiDict() self.subscriptions = [] self.thread_exception_msg = None def run(self): ''' Thread runnable method ''' try: # Creation of Connection class logging.info('Establishing connection with client id #' + str(self.client_id)) self.connection = ibConnection( host='localhost', port=self.port, clientId=self.client_id) # Register data handlers self.connection.registerAll(self._receive_message) # Create a thread for sending messages self.sender_thread = Thread(target=self._send_messages) self.sender_thread.start() # Connect self.connection.connect() self.status = 'CONNECTED' # Check input messages while(self.keep_alive): while not self.input_queue.empty(): # Extract message from queue and process it self._process_message(self.input_queue.get()) if self.thread_exception_msg: logging.error(self.thread_exception_msg) self.close() except Exception, e: traceback.print_exc() logging.error(str(e)) self.stop() def stop(self): self.keep_alive = False self.disconnect() self.sender_thread.__stop = True self.__stop = True def disconnect(self): ''' Disconnect from IB API ''' if self.connection: self.connection.disconnect() self.status = 'DISCONNECTED' logging.info('Disconnected from IB server') def save_to_excel(self, opt_chains, output_file): ''' This function stores an option chain in given output excel file opt_chains -> dictionary of {ticker: dataframe} output_file -> output file where the data will be exported ''' writer = pd.ExcelWriter(output_file) for ticker in opt_chains.keys(): opt_chains[ticker].to_excel(writer, sheet_name=ticker) writer.save() def _receive_message(self, msg): ''' Callback method to receive messages from IB server ''' self.input_queue.put(msg) def _process_message(self, msg): ''' Callback method to process each different message ''' msg_reqId = msg.values()[0] logging.info('Received msg of type ' + str(msg.typeName) + ' (' + str(msg_reqId) + ')') if msg.typeName == 'managedAccounts': logging.info('Established connection with IB account ' + str(msg.accountsList)) # Ask for an account summary TODO under testing # self._get_account_summary(msg.accountsList, True) elif msg.typeName == 'nextValidId': logging.info('Next valid order id: ' + str(msg.orderId)) self.reqId = msg.orderId if msg.orderId == 1: # Clear contract dicts self.reqId_ticker = {} self.opt_chain = MultiDict() self.stk_data = MultiDict() self.contracts = [] elif msg.typeName == 'updateAccountTime': pass # TODO UNIMPLEMENTED elif msg.typeName == 'updateAccountValue': pass # TODO UNIMPLEMENTED elif msg.typeName == 'updatePortfolio': self._parsePortfolioData(msg) elif msg.typeName == 'contractDetails': contract = msg.contractDetails.m_summary self.contracts.append(contract) # Check security type at received contractDetails message if contract.m_secType == 'OPT': # Store list of available options into dict self._save_option_contracts_to_dict(contract) else: logging.info('WARNING: Received contractDetails for security ' 'type ' + str(contract.m_secType) + ': UNIMPLEMENTED') elif msg.typeName == 'contractDetailsEnd': logging.info('Received contractDetailsEnd for reqId ' + str(msg_reqId)) # Request market data snapshots to the server self._get_market_data(snapshot=True) elif msg.typeName == 'tickPrice': self._parse_tickPrice(msg) elif msg.typeName == 'tickOptionComputation': self._parse_tickOptionComputation(msg) elif msg.typeName == 'tickGeneric': self._parse_tickGeneric(msg) elif msg.typeName == 'tickSnapshotEnd': # Remove the req id from the list of requested ids, to be able to # determine when all the data has arrived if msg_reqId in self.reqId_ticker.keys(): del self.reqId_ticker[msg_reqId] # Print info logging.info('Received tickSnapshotEnd for reqId ' + str(msg_reqId) + '. Still pending ' + str(len(self.reqId_ticker))) # Check if all the expected data has arrived self.check_if_all_data_arrived() elif msg.typeName == 'currentTime': logging.info('Current server time: ' + str(msg.time)) elif msg.typeName == 'execDetails': logging.info('Received execDetails for reqId ' + str(msg_reqId)) elif msg.typeName == 'error': logging.error(str(msg.errorCode) + ' - ' + str(msg.errorMsg)) if msg.errorCode == 200: # Requested contract is ambiguous, remove from req_Id List if msg_reqId in self.reqId_ticker.keys(): del self.reqId_ticker[msg_reqId] # Check if all the expected data has arrived self.check_if_all_data_arrived() elif msg.errorCode == 326: # ClientId in use, raise exception and reconnect with different # id self.thread_exception_msg = msg.errorMsg elif msg.typeName == 'connectionClosed': logging.info('Connection has been closed') def check_if_all_data_arrived(self): ''' This method checks if all the expected data has arrived ''' if not self.reqId_ticker: logging.info('All requested market data has arrived') if not self.subscriptions: self.status = 'IDLE' logging.info('IB connection status set to IDLE') self.event.set() def _parse_tickPrice(self, msg): ''' This method parses received message of type tickPrice msg -> received message ''' if msg.tickerId not in self.reqId_ticker.keys(): return price = msg.price contract = self.reqId_ticker[msg.tickerId] underlying = contract.m_symbol contract_id = contract.m_localSymbol # Parse message if msg.field == 1: if contract.m_secType == 'OPT': self.opt_chain[underlying][contract_id]['bid'] = str(price) elif contract.m_secType == 'STK': self.stk_data[underlying]['bid'] = str(price) elif msg.field == 2: if contract.m_secType == 'OPT': self.opt_chain[underlying][contract_id]['ask'] = str(price) elif contract.m_secType == 'STK': self.stk_data[underlying]['ask'] = str(price) elif msg.field == 9: if contract.m_secType == 'OPT': self.opt_chain[underlying][contract_id]['close'] = str(price) elif contract.m_secType == 'STK': self.stk_data[underlying]['close'] = str(price) if self._check_underlying_data(underlying): self.cancel_subscription(msg.tickerId) def _parse_tickOptionComputation(self, msg): ''' This method parses received message of type tickOptionComputation msg -> received message ''' if msg.tickerId not in self.reqId_ticker.keys(): return underlying = self.reqId_ticker[msg.tickerId].m_symbol contract_id = self.reqId_ticker[msg.tickerId].m_localSymbol field = msg.field price = msg.values()[2] delta = msg.delta impliedVolatility = msg.impliedVol optPrice = msg.optPrice pvDividend = msg.pvDividend gamma = msg.gamma vega = msg.vega theta = msg.theta undPrice = msg.undPrice # Parse message if field == 10: self.opt_chain[underlying][contract_id]['bid_delta'] = str(delta) self.opt_chain[underlying][contract_id][ 'bid_impliedVolatility'] = (str(impliedVolatility)) self.opt_chain[underlying][contract_id]['bid_optPrice'] = ( str(optPrice)) self.opt_chain[underlying][contract_id]['bid_pvDividend'] = ( str(pvDividend)) self.opt_chain[underlying][contract_id]['bid_gamma'] = str(gamma) self.opt_chain[underlying][contract_id]['bid_vega'] = str(vega) self.opt_chain[underlying][contract_id]['bid_theta'] = str(theta) self.opt_chain[underlying][contract_id]['bid_undPrice'] = ( str(undPrice)) elif field == 11: self.opt_chain[underlying][contract_id]['ask_delta'] = str(delta) self.opt_chain[underlying][contract_id][ 'ask_impliedVolatility'] = (str(impliedVolatility)) self.opt_chain[underlying][contract_id]['ask_optPrice'] = ( str(optPrice)) self.opt_chain[underlying][contract_id]['ask_pvDividend'] = ( str(pvDividend)) self.opt_chain[underlying][contract_id]['ask_gamma'] = str(gamma) self.opt_chain[underlying][contract_id]['ask_vega'] = str(vega) self.opt_chain[underlying][contract_id]['ask_theta'] = str(theta) self.opt_chain[underlying][contract_id]['ask_undPrice'] = ( str(undPrice)) elif field == 24: self.opt_chain[underlying][contract_id]['iv'] = str(price) def _parse_tickGeneric(self, msg): ''' This method parses received message of type tickGenetic, which contains historical volatility (tick 23) or implied volatility (tick 24), among others msg -> received message ''' if msg.tickerId not in self.reqId_ticker.keys(): return underlying = self.reqId_ticker[msg.tickerId].m_symbol if msg.tickType == 23: self.stk_data[underlying]['hv'] = str(msg.value) logging.info('[HV] ' + underlying + ': ' + str(msg.value)) elif msg.tickType == 24: self.stk_data[underlying]['iv'] = str(msg.value) logging.info('[IV] ' + underlying + ': ' + str(msg.value)) if self._check_underlying_data(underlying): self.cancel_subscription(msg.tickerId) def get_option_contracts(self, tickers, expiry=None, strike=None): ''' Call for all the options contract for the underlying tickers -> List of tickers whose options will be requested to IB server expiry -> [Optional] Expiry date of the options to be retrieved. Can be used as a results filter. If expiry is not provided, it will download all available expiries strike -> [Optional] Strike of the options to be retrieved. Can be used as a results filter. If strike is not provided, it will download all available strikes ''' self.status = 'WORKING' for ticker in tickers: logging.info( 'Getting ' + str(ticker) + ' option contracts' + (' expiring on ' + str(expiry)) if expiry else '' + (' with strike ' + str(strike)) if strike else '') # Contract creation contract = Contract() contract.m_symbol = ticker contract.m_exchange = 'SMART' contract.m_secType = 'OPT' if expiry: contract.m_expiry = expiry if strike: contract.m_strike = strike self.connection.reqContractDetails(self.reqId, contract) logging.info('Requested contract details for ' + ticker + ' (' + str(self.reqId) + ')') self.reqId += 1 def _get_market_data(self, snapshot): ''' Requests all the options prices and greeks snapshot -> True if only a snapshot of market data is desired; False if a subscription is desired ''' self.status = 'WORKING' # Loop through all options contracts for contract in self.contracts: # Store the relationship between reqId and contract object self.reqId_ticker[self.reqId] = contract self.output_queue.put( (self.reqId, 'reqMktData', contract, snapshot)) # If it is a subscription, add reqId to the subscription list if not snapshot: self.subscriptions.append(self.reqId) self.reqId += 1 def cancel_subscription(self, req_id): ''' Cancels the data subscription associated to given req_id ''' if req_id in self.reqId_ticker.keys(): contract = self.reqId_ticker[req_id] self.output_queue.put((req_id, 'cancelMktData', contract, False)) def get_stock_historical_volatility(self, ticker): ''' Requests stock historical volatility ''' self.status = 'WORKING' # Create stock's contract stock_contract = Contract() stock_contract.m_symbol = ticker stock_contract.m_secType = 'STK' stock_contract.m_exchange = 'SMART' stock_contract.m_currency = 'USD' # Insert request to output queue self.reqId_ticker[self.reqId] = stock_contract self.output_queue.put( (self.reqId, 'reqStkHistoricalVol', stock_contract, False)) self.reqId += 1 def get_stock_implied_volatility(self, ticker, snapshot): ''' Requests stock implied volatility snapshot -> True if only a snapshot of market data is desired; False if a subscription is desired ''' self.status = 'WORKING' # Create stock's contract stock_contract = Contract() stock_contract.m_symbol = ticker stock_contract.m_secType = 'STK' stock_contract.m_exchange = 'SMART' stock_contract.m_currency = 'USD' # Insert request to output queue self.reqId_ticker[self.reqId] = stock_contract self.output_queue.put( (self.reqId, 'reqStkImpliedVol', stock_contract, snapshot)) # If it is a subscription, add reqId to the subscription list if not snapshot: self.subscriptions.append(self.reqId) self.reqId += 1 def _send_messages(self): ''' Method to send pending messages at output queue to the IB server ''' while True: while not self.output_queue.empty(): req_id, msgType, contract, snapshot = self.output_queue.get() if msgType == 'reqMktData': self.connection.reqMktData( req_id, contract, None, snapshot=snapshot) logging.info('Requested market data snapshot for ' + str(contract.m_localSymbol) + ' (' + str(req_id) + ')') elif msgType == 'cancelMktData': self.connection.cancelMktData(req_id) logging.info('Cancelled market data subscription for ' + str(contract.m_localSymbol) + ' (' + str(req_id) + ')') # Remove reqId from subscription list if req_id in self.subscriptions: self.subscriptions.remove(req_id) # If it does not belong to a subscription, remove the reqId # from the list of requested ids if req_id in self.reqId_ticker.keys(): del self.reqId_ticker[req_id] # Check if there is any pending job if not self.subscriptions and not self.reqId_ticker: self.status = 'IDLE' logging.info('IB connection status set to IDLE') self.event.set() elif msgType == 'reqStkHistoricalVol': # TODO Under test self.connection.reqMktData( req_id, contract, '104', snapshot=False) logging.info('Requested historical volatility for ' + str(contract.m_symbol) + ' (' + str(req_id) + ')') elif msgType == 'reqStkImpliedVol': self.connection.reqMktData( req_id, contract, '106', snapshot=False) logging.info('Requested implied volatility for ' + str(contract.m_symbol) + ' (' + str(req_id) + ')') elif msgType == 'reqAccountUpdates': # TODO Under test # Contract variable here refers to the account number self.connection.reqAccountUpdates(True, contract) # Sleep between messages to avoid collapsing IB server sleep(0.1) def _save_option_contracts_to_dict(self, opt_con): ''' It saves the options market data into the both the contracts dictionary and the option chain dictionary opt_con -> Contract details on an option ''' underlying = opt_con.m_symbol c_id = opt_con.m_localSymbol # contract id # Store data in the option chain dictionary self.opt_chain[underlying][c_id]['m_conId'] = opt_con.m_conId self.opt_chain[underlying][c_id]['m_expiry'] = opt_con.m_expiry self.opt_chain[underlying][c_id]['m_strike'] = opt_con.m_strike self.opt_chain[underlying][c_id]['m_right'] = opt_con.m_right self.opt_chain[underlying][c_id]['m_multiplier'] = opt_con.m_multiplier self.opt_chain[underlying][c_id]['m_currency'] = opt_con.m_currency self.opt_chain[underlying][c_id]['m_localSymbol'] = c_id def _check_underlying_data(self, underlying): ''' Checks if close price and IV of given underlying have been retrieved, and if so, cancels subscription. Returns true if all the data has been retrieved ''' return ('close' in self.stk_data[underlying].keys() and 'iv' in self.stk_data[underlying].keys()) def get_as_dataframe(self): # TODO Under test self.connection.reqCurrentTime() # TODO WTF? def _get_account_summary(self, accounts_list, snapshot): # TODO under test self.output_queue.put( (self.reqId, 'reqAccountUpdates', accounts_list, snapshot)) self.reqId += 1 def _parsePortfolioData(self, msg): # TODO Under test ''' Parses a updatePortfolio message to get data on every position we currently hold at our portfolio ''' symbol = msg.contract.m_localSymbol self.portfolio_positions[symbol]['contract'] = msg.contract self.portfolio_positions[symbol]['position'] = msg.position self.portfolio_positions[symbol]['marketPrice'] = msg.marketPrice self.portfolio_positions[symbol]['averageCost'] = msg.averageCost self.portfolio_positions[symbol]['unrealizedPNL'] = msg.unrealizedPNL logging.info('[PORTFOLIO] ' + str(msg.position) + ' ' + str(symbol) + ' bought @ ' + str(msg.averageCost) + ', now valued @ ' + str(msg.marketPrice) + ' (PL: ' + str(msg.unrealizedPNL) + ')') class IBAPI_ClientIdInUse(Exception): pass class MultiDict(dict): def __missing__(self, key): value = self[key] = type(self)() return value
__init__.py
import copy import threading import typing StateType = typing.TypeVar('StateType') class TimedThread(typing.Generic[StateType]): """ This is a "Thread" class that runs a job for a maximum period of time. The class provides concurrency-safe methods to retrieve and persist a chunk of state. """ def __init__(self, timeout_seconds: float, state: StateType) -> None: self.timeout_seconds = timeout_seconds self.__state = copy.deepcopy(state) self.lock = threading.Lock() self._exception: Exception = None def run(self) -> StateType: raise NotImplementedError() def _run(self) -> None: try: state = self.run() except Exception as e: self._exception = e else: self.save_state(state) def _start_async(self) -> None: self.thread = threading.Thread(target=self._run, daemon=True) self.thread.start() def _join(self) -> StateType: self.thread.join(self.timeout_seconds) with self.lock: state = copy.deepcopy(self.__state) return state def start(self) -> StateType: self._start_async() state = self._join() if self._exception: raise self._exception return state def get_state_copy(self) -> StateType: with self.lock: state_copy = copy.deepcopy(self.__state) return state_copy def save_state(self, new_state: StateType) -> None: new_state = copy.deepcopy(new_state) with self.lock: self.__state = new_state
Create_ionisaton_fcoll_tables.py
import numpy import math from scipy import interpolate from decimal import * import string import pickle import os import multiprocessing from array import array # These are values set in ANAL_PARAMS.H and HEAT_PARAMS.H. Note: These must be changed by hand if any values in the .H files are changed Z_HEAT_MAX = 35. ZPRIME_STEP_FACTOR = 1.02 DELTA_R_HII_FACTOR = 1.1 FRACT_FLOAT_ERR = 1e-3 TWOPLACES = Decimal(10) ** -2 # same as Decimal('0.01') FOURPLACES = Decimal(10) ** -4 # same as Decimal('0.0001') SIXPLACES = Decimal(10) ** -6 # same as Decimal('0.000001') WalkerID1 = '1.000000' WalkerID2 = '1.000000' def worker(iterable,command_list): os.system(command_list[iterable]) def CreateFilestrings_and_Commands(R_OPTION,redshifts,R_STEPS,last_filter_R,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_MFP_MIN,R_BUBBLE_MAX, Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,ALPHA_PL): command_list = [] filenames = [] command_file_remove = [] for i in range(len(redshifts)): if R_OPTION==1: for j in range(R_STEPS): R_MFP = R_MFP_MIN + (R_BUBBLE_MAX - R_MFP_MIN)*float(j)/(float(R_STEPS) - 1.) for k in range(Tvir_STEPS): T_VIR = Tvir_MIN + (Tvir_MAX - Tvir_MIN)*float(k)/(float(Tvir_STEPS) - 1.) if IncludeAlpha is True: for l in range(PL_INDEX_STEPS): ALPHA_PL = EFF_FACTOR_PL_INDEX_MIN + (EFF_FACTOR_PL_INDEX_MAX - EFF_FACTOR_PL_INDEX_MIN)*float(l)/(float(PL_INDEX_STEPS) - 1.) command = "./Createfcoll_ionisation_LC %s %s %s %s %s %s 0"%(WalkerID1,WalkerID2,Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES)) command_list.append(command) filenames.append("Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),0)) command = "rm Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),0) command_file_remove.append(command) else: ALPHA_PL = Decimal(repr(float(ALPHA_PL))).quantize(SIXPLACES) command = "./Createfcoll_ionisation_LC %s %s %s %s %s %s 0"%(WalkerID1,WalkerID2,Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL) print command command_list.append(command) filenames.append("Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL,0)) command = "rm Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL,0) command_file_remove.append(command) if R_OPTION==0: R_MFP = last_filter_R for k in range(Tvir_STEPS): T_VIR = Tvir_MIN + (Tvir_MAX - Tvir_MIN)*float(k)/(float(Tvir_STEPS) - 1.) if IncludeAlpha is True: for l in range(PL_INDEX_STEPS): ALPHA_PL = EFF_FACTOR_PL_INDEX_MIN + (EFF_FACTOR_PL_INDEX_MAX - EFF_FACTOR_PL_INDEX_MIN)*float(l)/(float(PL_INDEX_STEPS) - 1.) command = "./Createfcoll_ionisation_LC %s %s %s %s %s %s 1"%(WalkerID1,WalkerID2,Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(last_filter_R)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES)) command_list.append(command) filenames.append("Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),1)) command = "rm Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),1) command_file_remove.append(command) else: ALPHA_PL = Decimal(repr(float(ALPHA_PL))).quantize(SIXPLACES) command = "./Createfcoll_ionisation_LC %s %s %s %s %s %s 1"%(WalkerID1,WalkerID2,Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(last_filter_R)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL) # print command command_list.append(command) filenames.append("Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL,1)) command = "rm Box_fcoll_z%s_%s_%s_%s_%i.txt"%(Decimal(repr(redshifts[i])).quantize(SIXPLACES),Decimal(repr(R_MFP)).quantize(SIXPLACES),Decimal(repr(T_VIR)).quantize(SIXPLACES),ALPHA_PL,1) command_file_remove.append(command) return command_list,filenames,command_file_remove def CreateTable(num_processes,command_list,filenames,command_file_remove,R_OPTION,redshifts,R_STEPS,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_BUBBLE_MAX,last_filter_R, Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,ALPHA_PL): if IncludeAlpha is True: # This is used for the filter step (R_BUBBLE_MAX). This will be interpolated over R_MFP, T_VIR and ALPHA_PL in the drive_21cm_streamlined.c f_coll_table_first_step = numpy.zeros(len(redshifts)*R_STEPS*Tvir_STEPS*PL_INDEX_STEPS) # This is used for the final filter step (unfiltered density field). Will be fixed for all R_MFP. Therefore, this table is only interpolated over T_VIR and ALPHA_PL f_coll_table_last_step = numpy.zeros(len(redshifts)*Tvir_STEPS*PL_INDEX_STEPS) total_samples = len(redshifts)*Tvir_STEPS*PL_INDEX_STEPS*(1 + R_STEPS) else: # This is used for the filter step (R_BUBBLE_MAX). This will be interpolated over R_MFP and T_VIR in the drive_21cm_streamlined.c f_coll_table_first_step = numpy.zeros(len(redshifts)*R_STEPS*Tvir_STEPS) # This is used for the final filter step (unfiltered density field). Will be fixed for all R_MFP. Therefore, this table is only interpolated over T_VIR f_coll_table_last_step = numpy.zeros(len(redshifts)*Tvir_STEPS) total_samples = len(redshifts)*Tvir_STEPS*(1 + R_STEPS) f_coll_vals = numpy.zeros(total_samples) num_divisions = int(numpy.floor(len(command_list)/num_processes)) counter = 0 for i in xrange(num_divisions): processes = [] for ii in xrange(num_processes): p = multiprocessing.Process(target=worker, args=(ii + num_processes*i,command_list)) p.start() processes.append(p) for p in processes: p.join() for ii in xrange(num_processes): f_coll_val = numpy.loadtxt('%s'%(filenames[counter]), usecols=(0,)) f_coll_vals[counter] = f_coll_val os.system(command_file_remove[counter]) counter += 1 remainder = len(command_list)%num_processes processes = [] for ii in xrange(remainder): p = multiprocessing.Process(target=worker, args=(ii + num_divisions*num_processes,command_list)) p.start() processes.append(p) for p in processes: p.join() for ii in xrange(remainder): f_coll_val = numpy.loadtxt('%s'%(filenames[counter]), usecols=(0,)) f_coll_vals[counter] = f_coll_val os.system(command_file_remove[counter]) counter += 1 counter = 0 if R_OPTION == 1: for i in range(len(redshifts)): for j in range(R_STEPS): for k in range(Tvir_STEPS): if IncludeAlpha is True: for l in range(PL_INDEX_STEPS): f_coll_table_first_step[l + PL_INDEX_STEPS*( k + Tvir_STEPS*( j + R_STEPS*i ) )] = f_coll_vals[counter] counter += 1 else: f_coll_table_first_step[k + Tvir_STEPS*( j + R_STEPS*i )] = f_coll_vals[counter] counter += 1 if IncludeAlpha is True: output_file = open('Ionisation_fcoll_table_Rmax%s_Tmin%s_Tmax%s_PLmin%s_PLmax%s_%d_%sMpc'%(Decimal(repr(R_BUBBLE_MAX)).quantize(SIXPLACES),Decimal(repr(Tvir_MIN)).quantize(SIXPLACES),Decimal(repr(Tvir_MAX)).quantize(SIXPLACES),Decimal(repr(EFF_FACTOR_PL_INDEX_MIN)).quantize(SIXPLACES),Decimal(repr(EFF_FACTOR_PL_INDEX_MAX)).quantize(SIXPLACES),HII_DIM,BOX_LEN), 'wb') else: output_file = open('Ionisation_fcoll_table_Rmax%s_Tmin%s_Tmax%s_PL%s_%d_%sMpc'%(Decimal(repr(R_BUBBLE_MAX)).quantize(SIXPLACES),Decimal(repr(Tvir_MIN)).quantize(SIXPLACES),Decimal(repr(Tvir_MAX)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),HII_DIM,BOX_LEN), 'wb') f_coll_table_first_step.tofile(output_file, format = 'float64') output_file.close() if R_OPTION == 0: for i in range(len(redshifts)): for k in range(Tvir_STEPS): if IncludeAlpha is True: for l in range(PL_INDEX_STEPS): f_coll_table_last_step[l + PL_INDEX_STEPS*( k + Tvir_STEPS*i )] = f_coll_vals[counter] counter += 1 else: f_coll_table_last_step[k + Tvir_STEPS*i] = f_coll_vals[counter] counter += 1 if IncludeAlpha is True: output_file = open('Ionisation_fcoll_table_final_Rmax%s_Tmin%s_Tmax%s_PLmin%s_PLmax%s_%d_%sMpc'%(Decimal(repr(R_BUBBLE_MAX)).quantize(SIXPLACES),Decimal(repr(Tvir_MIN)).quantize(SIXPLACES),Decimal(repr(Tvir_MAX)).quantize(SIXPLACES),Decimal(repr(EFF_FACTOR_PL_INDEX_MIN)).quantize(SIXPLACES),Decimal(repr(EFF_FACTOR_PL_INDEX_MAX)).quantize(SIXPLACES),HII_DIM,BOX_LEN), 'wb') else: output_file = open('Ionisation_fcoll_table_final_Rmax%s_Tmin%s_Tmax%s_PL%s_%d_%sMpc'%(Decimal(repr(R_BUBBLE_MAX)).quantize(SIXPLACES),Decimal(repr(Tvir_MIN)).quantize(SIXPLACES),Decimal(repr(Tvir_MAX)).quantize(SIXPLACES),Decimal(repr(ALPHA_PL)).quantize(SIXPLACES),HII_DIM,BOX_LEN), 'wb') f_coll_table_last_step.tofile(output_file, format = 'float64') output_file.close() if __name__ == '__main__': # If the full spin temperature computation is to be performed, a redshift must be provided to which to perform the evolution down to. TsCalc_z = 6.0 redshifts = [] z_prime = TsCalc_z*1.0001 while (z_prime < Z_HEAT_MAX): z_prime = ((1.+z_prime)*ZPRIME_STEP_FACTOR - 1.) prev_z_prime = Z_HEAT_MAX z_prime = ((1.+z_prime)/ ZPRIME_STEP_FACTOR - 1.) while (z_prime > TsCalc_z): redshifts.append(z_prime) prev_z_prime = z_prime z_prime = ((1.+prev_z_prime) / ZPRIME_STEP_FACTOR - 1.) redshifts = numpy.array(redshifts) # L_FACTOR taken as set in ANAL_PARAMS.H (Note: These must be changed by hand if any values in the .H files are changed) L_FACTOR = 0.620350491 # R_BUBBLE_MIN taken as set in ANAL_PARAMS.H R_BUBBLE_MIN = L_FACTOR # R_BUBBLE_MAX is the maximum allowed range as set in 21CMMC.py. Could make it a global value in 21CMMC.py to make it a referenceable value... R_BUBBLE_MAX = 50.0 # Minimum and maximum allowed range for Tvir as set in 21CMMC.py. Again, could make it a global value in 21CMMC.py to make it a referenceable value... Tvir_MIN = 4.0 Tvir_MAX = 6.0 IncludeAlpha = False Fiducial_Alpha = 0.0 # Minimum and maximum allowed range for Alpha as set in 21CMMC.py. Again, could make it a global value in 21CMMC.py to make it a referenceable value... EFF_FACTOR_PL_INDEX_MIN = -2.0 EFF_FACTOR_PL_INDEX_MAX = 2.0 R_STEPS = 40 Tvir_STEPS = 2 PL_INDEX_STEPS = 0 cell_length_factor = L_FACTOR # Box length as set in INIT_PARAMS.H (Note: These must be changed by hand if any values in the .H files are changed) BOX_LEN = 75 # BOX_LEN = 300 # BOX_LEN = 150 # HII_DIM: number of voxels, set in INIT_PARAMS.H (Note: These must be changed by hand if any values in the .H files are changed) HII_DIM = 50 # HII_DIM = 200 # HII_DIM = 100 R_MFP_MIN = max(R_BUBBLE_MIN, (cell_length_factor*BOX_LEN/float(HII_DIM))) last_filter_R = max(cell_length_factor*BOX_LEN/float(HII_DIM), R_BUBBLE_MIN) if IncludeAlpha is False: PL_INDEX_STEPS = 1 num_processes = 8 command_list = [] filenames = [] command_file_remove = [] create_file = open("f_coll_lightcone_data_%s_%sMpc.txt"%(HII_DIM,BOX_LEN),"w") create_file.write("R_MFP_UB %s\n"%(R_BUBBLE_MAX)) create_file.write("X_RAY_TVIR_LB %s\n"%(Tvir_MIN)) create_file.write("X_RAY_TVIR_UB %s\n"%(Tvir_MAX)) create_file.write("ZETA_PL_LB %s\n"%(EFF_FACTOR_PL_INDEX_MIN)) create_file.write("ZETA_PL_UB %s\n"%(EFF_FACTOR_PL_INDEX_MAX)) create_file.write("R_MFP_STEPS %s\n"%(R_STEPS)) create_file.write("TVIR_STEPS %s\n"%(Tvir_STEPS)) create_file.write("PL_STEPS %s\n"%(PL_INDEX_STEPS)) create_file.close() R_OPTION = 1 command_list, filenames, command_file_remove = CreateFilestrings_and_Commands(R_OPTION,redshifts,R_STEPS,last_filter_R,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_MFP_MIN,R_BUBBLE_MAX, Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,Fiducial_Alpha) CreateTable(num_processes,command_list,filenames,command_file_remove,R_OPTION,redshifts,R_STEPS,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_BUBBLE_MAX,last_filter_R,Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,Fiducial_Alpha) R_OPTION = 0 command_list, filenames, command_file_remove = CreateFilestrings_and_Commands(R_OPTION,redshifts,R_STEPS,last_filter_R,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_MFP_MIN,R_BUBBLE_MAX, Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,Fiducial_Alpha) CreateTable(num_processes,command_list,filenames,command_file_remove,R_OPTION,redshifts,R_STEPS,Tvir_STEPS,PL_INDEX_STEPS,IncludeAlpha,R_BUBBLE_MAX,last_filter_R,Tvir_MIN,Tvir_MAX,EFF_FACTOR_PL_INDEX_MIN,EFF_FACTOR_PL_INDEX_MAX,Fiducial_Alpha)
test_node.py
import os import sys import logging import requests import time import traceback import random import pytest import ray import threading from datetime import datetime, timedelta from ray.cluster_utils import Cluster from ray.new_dashboard.tests.conftest import * # noqa from ray.test_utils import (format_web_url, wait_until_server_available, wait_for_condition, wait_until_succeeded_without_exception) logger = logging.getLogger(__name__) def test_nodes_update(enable_test_module, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]) is True) webui_url = ray_start_with_dashboard["webui_url"] webui_url = format_web_url(webui_url) timeout_seconds = 10 start_time = time.time() while True: time.sleep(1) try: response = requests.get(webui_url + "/test/dump") response.raise_for_status() try: dump_info = response.json() except Exception as ex: logger.info("failed response: %s", response.text) raise ex assert dump_info["result"] is True dump_data = dump_info["data"] assert len(dump_data["nodes"]) == 1 assert len(dump_data["agents"]) == 1 assert len(dump_data["nodeIdToIp"]) == 1 assert len(dump_data["nodeIdToHostname"]) == 1 assert dump_data["nodes"].keys() == dump_data[ "nodeIdToHostname"].keys() response = requests.get(webui_url + "/test/notified_agents") response.raise_for_status() try: notified_agents = response.json() except Exception as ex: logger.info("failed response: %s", response.text) raise ex assert notified_agents["result"] is True notified_agents = notified_agents["data"] assert len(notified_agents) == 1 assert notified_agents == dump_data["agents"] break except (AssertionError, requests.exceptions.ConnectionError) as e: logger.info("Retry because of %s", e) finally: if time.time() > start_time + timeout_seconds: raise Exception("Timed out while testing.") def test_node_info(disable_aiohttp_cache, ray_start_with_dashboard): @ray.remote class Actor: def getpid(self): return os.getpid() actors = [Actor.remote(), Actor.remote()] actor_pids = [actor.getpid.remote() for actor in actors] actor_pids = set(ray.get(actor_pids)) assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]) is True) webui_url = ray_start_with_dashboard["webui_url"] webui_url = format_web_url(webui_url) node_id = ray_start_with_dashboard["node_id"] timeout_seconds = 10 start_time = time.time() last_ex = None while True: time.sleep(1) try: response = requests.get(webui_url + "/nodes?view=hostnamelist") response.raise_for_status() hostname_list = response.json() assert hostname_list["result"] is True, hostname_list["msg"] hostname_list = hostname_list["data"]["hostNameList"] assert len(hostname_list) == 1 hostname = hostname_list[0] response = requests.get(webui_url + f"/nodes/{node_id}") response.raise_for_status() detail = response.json() assert detail["result"] is True, detail["msg"] detail = detail["data"]["detail"] assert detail["hostname"] == hostname assert detail["raylet"]["state"] == "ALIVE" assert "raylet" in detail["cmdline"][0] assert len(detail["workers"]) >= 2 assert len(detail["actors"]) == 2, detail["actors"] assert len(detail["raylet"]["viewData"]) > 0 actor_worker_pids = set() for worker in detail["workers"]: if "ray::Actor" in worker["cmdline"][0]: actor_worker_pids.add(worker["pid"]) assert actor_worker_pids == actor_pids response = requests.get(webui_url + "/nodes?view=summary") response.raise_for_status() summary = response.json() assert summary["result"] is True, summary["msg"] assert len(summary["data"]["summary"]) == 1 summary = summary["data"]["summary"][0] assert summary["hostname"] == hostname assert summary["raylet"]["state"] == "ALIVE" assert "raylet" in summary["cmdline"][0] assert "workers" not in summary assert "actors" not in summary assert "viewData" not in summary["raylet"] assert "objectStoreAvailableMemory" in summary["raylet"] assert "objectStoreUsedMemory" in summary["raylet"] break except Exception as ex: last_ex = ex finally: if time.time() > start_time + timeout_seconds: ex_stack = traceback.format_exception( type(last_ex), last_ex, last_ex.__traceback__) if last_ex else [] ex_stack = "".join(ex_stack) raise Exception(f"Timed out while testing, {ex_stack}") def test_memory_table(disable_aiohttp_cache, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])) @ray.remote class ActorWithObjs: def __init__(self): self.obj_ref = ray.put([1, 2, 3]) def get_obj(self): return ray.get(self.obj_ref) my_obj = ray.put([1, 2, 3] * 100) # noqa actors = [ActorWithObjs.remote() for _ in range(2)] # noqa results = ray.get([actor.get_obj.remote() for actor in actors]) # noqa webui_url = format_web_url(ray_start_with_dashboard["webui_url"]) resp = requests.get( webui_url + "/memory/set_fetch", params={"shouldFetch": "true"}) resp.raise_for_status() def check_mem_table(): resp = requests.get(f"{webui_url}/memory/memory_table") resp_data = resp.json() assert resp_data["result"] latest_memory_table = resp_data["data"]["memoryTable"] summary = latest_memory_table["summary"] # 1 ref per handle and per object the actor has a ref to assert summary["totalActorHandles"] == len(actors) * 2 # 1 ref for my_obj assert summary["totalLocalRefCount"] == 1 wait_until_succeeded_without_exception( check_mem_table, (AssertionError, ), timeout_ms=1000) def test_get_all_node_details(disable_aiohttp_cache, ray_start_with_dashboard): assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])) webui_url = format_web_url(ray_start_with_dashboard["webui_url"]) @ray.remote class ActorWithObjs: def __init__(self): print("I also log a line") self.obj_ref = ray.put([1, 2, 3]) def get_obj(self): return ray.get(self.obj_ref) actors = [ActorWithObjs.remote() for _ in range(2)] # noqa timeout_seconds = 20 start_time = time.time() last_ex = None def check_node_details(): resp = requests.get(f"{webui_url}/nodes?view=details") resp_json = resp.json() resp_data = resp_json["data"] clients = resp_data["clients"] node = clients[0] assert len(clients) == 1 assert len(node.get("actors")) == 2 # Workers information should be in the detailed payload assert "workers" in node assert "logCount" in node # Two lines printed by ActorWithObjs # One line printed by autoscaler: monitor.py:118 -- Monitor: Started assert node["logCount"] > 2 print(node["workers"]) assert len(node["workers"]) == 2 assert node["workers"][0]["logCount"] == 1 while True: time.sleep(1) try: check_node_details() break except (AssertionError, KeyError, IndexError) as ex: last_ex = ex finally: if time.time() > start_time + timeout_seconds: ex_stack = traceback.format_exception( type(last_ex), last_ex, last_ex.__traceback__) if last_ex else [] ex_stack = "".join(ex_stack) raise Exception(f"Timed out while testing, {ex_stack}") @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_multi_nodes_info(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster: Cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) cluster.add_node() cluster.add_node() def _check_nodes(): try: response = requests.get(webui_url + "/nodes?view=summary") response.raise_for_status() summary = response.json() assert summary["result"] is True, summary["msg"] summary = summary["data"]["summary"] assert len(summary) == 3 for node_info in summary: node_id = node_info["raylet"]["nodeId"] response = requests.get(webui_url + f"/nodes/{node_id}") response.raise_for_status() detail = response.json() assert detail["result"] is True, detail["msg"] detail = detail["data"]["detail"] assert detail["raylet"]["state"] == "ALIVE" response = requests.get(webui_url + "/test/dump?key=agents") response.raise_for_status() agents = response.json() assert len(agents["data"]["agents"]) == 3 return True except Exception as ex: logger.info(ex) return False wait_for_condition(_check_nodes, timeout=15) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_multi_node_churn(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster: Cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = format_web_url(cluster.webui_url) def cluster_chaos_monkey(): worker_nodes = [] while True: time.sleep(5) if len(worker_nodes) < 2: worker_nodes.append(cluster.add_node()) continue should_add_node = random.randint(0, 1) if should_add_node: worker_nodes.append(cluster.add_node()) else: node_index = random.randrange(0, len(worker_nodes)) node_to_remove = worker_nodes.pop(node_index) cluster.remove_node(node_to_remove) def get_index(): resp = requests.get(webui_url) resp.raise_for_status() def get_nodes(): resp = requests.get(webui_url + "/nodes?view=summary") resp.raise_for_status() summary = resp.json() assert summary["result"] is True, summary["msg"] assert summary["data"]["summary"] t = threading.Thread(target=cluster_chaos_monkey, daemon=True) t.start() t_st = datetime.now() duration = timedelta(seconds=60) while datetime.now() < t_st + duration: get_index() time.sleep(2) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_logs(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) nodes = ray.nodes() assert len(nodes) == 1 node_ip = nodes[0]["NodeManagerAddress"] @ray.remote class LoggingActor: def go(self, n): i = 0 while i < n: print(f"On number {i}") i += 1 def get_pid(self): return os.getpid() la = LoggingActor.remote() la2 = LoggingActor.remote() la_pid = str(ray.get(la.get_pid.remote())) la2_pid = str(ray.get(la2.get_pid.remote())) ray.get(la.go.remote(4)) ray.get(la2.go.remote(1)) def check_logs(): node_logs_response = requests.get( f"{webui_url}/node_logs", params={"ip": node_ip}) node_logs_response.raise_for_status() node_logs = node_logs_response.json() assert node_logs["result"] assert type(node_logs["data"]["logs"]) is dict assert all( pid in node_logs["data"]["logs"] for pid in (la_pid, la2_pid)) assert len(node_logs["data"]["logs"][la2_pid]) == 1 actor_one_logs_response = requests.get( f"{webui_url}/node_logs", params={ "ip": node_ip, "pid": str(la_pid) }) actor_one_logs_response.raise_for_status() actor_one_logs = actor_one_logs_response.json() assert actor_one_logs["result"] assert type(actor_one_logs["data"]["logs"]) is dict assert len(actor_one_logs["data"]["logs"][la_pid]) == 4 wait_until_succeeded_without_exception( check_logs, (AssertionError), timeout_ms=1000) @pytest.mark.parametrize( "ray_start_cluster_head", [{ "include_dashboard": True }], indirect=True) def test_errors(enable_test_module, disable_aiohttp_cache, ray_start_cluster_head): cluster = ray_start_cluster_head assert (wait_until_server_available(cluster.webui_url) is True) webui_url = cluster.webui_url webui_url = format_web_url(webui_url) nodes = ray.nodes() assert len(nodes) == 1 node_ip = nodes[0]["NodeManagerAddress"] @ray.remote class ErrorActor(): def go(self): raise ValueError("This is an error") def get_pid(self): return os.getpid() ea = ErrorActor.remote() ea_pid = ea.get_pid.remote() ea.go.remote() def check_errs(): node_errs_response = requests.get( f"{webui_url}/node_logs", params={"ip": node_ip}) node_errs_response.raise_for_status() node_errs = node_errs_response.json() assert node_errs["result"] assert type(node_errs["data"]["errors"]) is dict assert ea_pid in node_errs["data"]["errors"] assert len(node_errs["data"]["errors"][ea_pid]) == 1 actor_err_response = requests.get( f"{webui_url}/node_logs", params={ "ip": node_ip, "pid": str(ea_pid) }) actor_err_response.raise_for_status() actor_errs = actor_err_response.json() assert actor_errs["result"] assert type(actor_errs["data"]["errors"]) is dict assert len(actor_errs["data"]["errors"][ea_pid]) == 4 wait_until_succeeded_without_exception( check_errs, (AssertionError), timeout_ms=1000) if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
message_server.py
# Copyright 2017 Google Inc. All rights reserved. # Use of this source code is governed by the Apache 2.0 license that can be # found in the LICENSE file. import Queue import logging import threading import time import tornado.ioloop import tornado.web MESSAGE_SERVER = None BLANK_PAGE = """<html> <head> <title>Blank</title> <style type="text/css">body {background-color: #FFF;}</style> </head> <body> </body> </html>""" ORANGE_PAGE = """<html> <head> <title>Orange</title> <style> body {background-color: white; margin: 0;} #wptorange {width:100%; height: 100%; background-color: #DE640D;} </style> </head> <body><div id='wptorange'></div></body> </html>""" class TornadoRequestHandler(tornado.web.RequestHandler): """Request handler for when we are using tornado""" def get(self): """Handle GET requests""" import ujson as json response = None content_type = 'text/plain' if self.request.uri == '/ping': response = 'pong' elif self.request.uri == '/blank.html': content_type = 'text/html' response = BLANK_PAGE elif self.request.uri == '/orange.html': content_type = 'text/html' response = ORANGE_PAGE elif self.request.uri == '/config': # JSON config data content_type = 'application/json' response = '{}' if MESSAGE_SERVER.config is not None: response = json.dumps(MESSAGE_SERVER.config) elif self.request.uri == '/config.html': # Orange HTML page that can be queried from the extension for config data content_type = 'text/html' response = "<html><head>\n" response += "<style>\n" response += "body {background-color: white; margin: 0;}\n" response += "#wptorange {width:100%; height: 100%; background-color: #DE640D;}\n" response += "</style>\n" response += "</head><body><div id='wptorange'></div>\n" if MESSAGE_SERVER.config is not None: import cgi response += '<div id="wptagentConfig" style="display: none;">' response += cgi.escape(json.dumps(MESSAGE_SERVER.config)) response += '</div>' response += "</body></html>" if response is not None: self.set_status(200) self.set_header("Content-Type", content_type) self.set_header("Referrer-Policy", "no-referrer") self.write(response) def post(self): """Handle POST messages""" import ujson as json try: messages = self.request.body if messages is not None and len(messages): if self.request.uri == '/log': logging.debug(messages) else: for line in messages.splitlines(): line = line.strip() if len(line): message = json.loads(line) if 'body' not in message and self.request.uri != '/etw': message['body'] = None MESSAGE_SERVER.handle_message(message) except Exception: pass self.set_status(200) class MessageServer(object): """Local HTTP server for interacting with the extension""" def __init__(self): global MESSAGE_SERVER MESSAGE_SERVER = self self.thread = None self.messages = Queue.Queue() self.config = None self.__is_started = threading.Event() def get_message(self, timeout): """Get a single message from the queue""" message = self.messages.get(block=True, timeout=timeout) self.messages.task_done() return message def flush_messages(self): """Flush all of the pending messages""" try: while True: self.messages.get_nowait() self.messages.task_done() except Exception: pass def handle_message(self, message): """Add a received message to the queue""" self.messages.put(message) def start(self): """Start running the server in a background thread""" self.__is_started.clear() self.thread = threading.Thread(target=self.run) self.thread.daemon = True self.thread.start() self.__is_started.wait(timeout=30) def stop(self): """Stop running the server""" logging.debug("Shutting down extension server") self.must_exit = True if self.thread is not None: ioloop = tornado.ioloop.IOLoop.instance() ioloop.add_callback(ioloop.stop) self.thread.join() self.thread = None logging.debug("Extension server stopped") def is_ok(self): """Check that the server is responding and restart it if necessary""" import requests import monotonic end_time = monotonic.monotonic() + 30 server_ok = False proxies = {"http": None, "https": None} while not server_ok and monotonic.monotonic() < end_time: try: response = requests.get('http://127.0.0.1:8888/ping', timeout=10, proxies=proxies) if response.text == 'pong': server_ok = True except Exception: pass if not server_ok: time.sleep(5) return server_ok def run(self): """Main server loop""" logging.debug('Starting extension server on port 8888') application = tornado.web.Application([(r"/.*", TornadoRequestHandler)]) application.listen(8888, '127.0.0.1') self.__is_started.set() tornado.ioloop.IOLoop.instance().start()
keep_alive.py
from flask import Flask from threading import Thread app = Flask('') @app.route('/') def main(): return '<meta http-equiv="refresh" content="0; URL=https://phantomcodes.ga/support"/>' def run(): app.run(host="0.0.0.0", port=8080) def keep_alive(): server = Thread(target=run) server.start()
async_version.py
import threading import colorama from services import get_weather def main(): print(colorama.Fore.GREEN + "------------------------------------------\n" "| App created by alexfrunza |\n" "| Welcome to weather app (async version) |\n" "| Enter a city name to get weather |\n" "| Or type x to close it |\n" "------------------------------------------\n") while True: city_name = input() if city_name == "x": print("App is closing...") break t = threading.Thread(target=get_weather, args=(city_name,), daemon=True) t.start() if __name__ == '__main__': main()
iftop.py
#!/usr/bin/python from time import sleep, time from threading import Thread from subprocess import call, Popen from deepstream import post import subprocess, re print("Loading: iftop") recordName = "speed" interface = "eth0" # Number in seconds to switch to autonomous/manual Mode elapsedTimout = 16 obj = {} success = "" while success == "": try: success = post(obj, recordName) #success = post({"mode": "manual"}, "mode") except: print("Not connected to deepstream") sleep(1) ipAddress = " NOREC" upload = 0 download = 0 elapsedTime = 0 def getUpDownData(): global obj, ipAddress, upload, download, elapsedTime while True: try: # command: sudo iftop -o 10s -t -s 10 -L 1 -i wlp3s0 elapsedTime = 0 p = Popen([ "/usr/sbin/iftop", "-o", "10s", "-t", "-s", "10", "-L", "1", "-i", interface ], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() out = p[0] err = p[1] uploadArr = re.findall(r"Total send rate:\s+(\d{1,}\.{0,1}\d{0,})(\w+)", out) downloadArr = re.findall(r"Total receive rate:\s+(\d{1,}\.{0,1}\d{0,})(\w+)", out) ipAddressArr = re.findall(r"IP address is:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", err) if ipAddressArr is not []: ipAddress = ipAddressArr[0] if uploadArr is not [] and downloadArr is not []: upload = float(uploadArr[0][0]) download = float(downloadArr[0][0]) if uploadArr[0][1] == "Kb": upload = upload * 1000 if downloadArr[0][1] == "Kb": download = download * 1000 if uploadArr[0][1] == "Mb": upload = upload * 1000000 if downloadArr[0][1] == "Mb": download = download * 1000000 obj = {"upload": upload, "download": download, "ip": ipAddress} #print "upload: {} {} download: {} {} ip: {}".format(upload, uploadArr[0][1], download , downloadArr[0][1], ipAddress) dsSuccess = post(obj, recordName) print(obj) uploadArr = [] downloadArr = [] except: try: post({}, "speed") print("No data from interface: " + interface) sleep(1) except: print("cannot connect to deepstream.") def checkElapsedTime(): global elapsedTime while True: elapsedTime = elapsedTime + 1 if(elapsedTime > 16): try: post({ "mode": "autonomanual" }, "mode") upload = 0 download = 0 post({"upload": upload, "download": download, "ip": ipAddress, "elapsed": elapsedTime}, recordName) except: print("cannot post to deepstream") sleep(1) t1 = Thread(target=getUpDownData) #t2 = Thread(target=checkElapsedTime) t1.start() #t2.start()
test_process.py
# -*- coding: utf-8 -*- # Import python libs from __future__ import absolute_import, print_function, unicode_literals import io import os import sys import threading import time import signal import multiprocessing import functools import datetime import warnings # Import Salt Testing libs from tests.support.unit import TestCase, skipIf from tests.support.mock import ( patch, NO_MOCK, NO_MOCK_REASON ) # Import salt libs import salt.utils.platform import salt.utils.process from salt.utils.versions import warn_until_date # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin import psutil def die(func): ''' Add proc title ''' @functools.wraps(func) def wrapper(self): # Strip off the "test_" from the function name name = func.__name__[5:] def _die(): salt.utils.process.appendproctitle('test_{0}'.format(name)) setattr(self, 'die_' + name, _die) return wrapper def incr(func): ''' Increment counter ''' @functools.wraps(func) def wrapper(self): # Strip off the "test_" from the function name name = func.__name__[5:] def _incr(counter, num): salt.utils.process.appendproctitle('test_{0}'.format(name)) for _ in range(0, num): counter.value += 1 setattr(self, 'incr_' + name, _incr) return wrapper def spin(func): ''' Spin indefinitely ''' @functools.wraps(func) def wrapper(self): # Strip off the "test_" from the function name name = func.__name__[5:] def _spin(): salt.utils.process.appendproctitle('test_{0}'.format(name)) while True: time.sleep(1) setattr(self, 'spin_' + name, _spin) return wrapper class TestProcessManager(TestCase): @spin def test_basic(self): ''' Make sure that the process is alive 2s later ''' process_manager = salt.utils.process.ProcessManager() process_manager.add_process(self.spin_basic) initial_pid = next(six.iterkeys(process_manager._process_map)) time.sleep(2) process_manager.check_children() try: assert initial_pid == next(six.iterkeys(process_manager._process_map)) finally: process_manager.stop_restarting() process_manager.kill_children() time.sleep(0.5) # Are there child processes still running? if process_manager._process_map.keys(): process_manager.send_signal_to_processes(signal.SIGKILL) process_manager.stop_restarting() process_manager.kill_children() @spin def test_kill(self): process_manager = salt.utils.process.ProcessManager() process_manager.add_process(self.spin_kill) initial_pid = next(six.iterkeys(process_manager._process_map)) # kill the child if salt.utils.platform.is_windows(): os.kill(initial_pid, signal.SIGTERM) else: os.kill(initial_pid, signal.SIGKILL) # give the OS time to give the signal... time.sleep(0.1) process_manager.check_children() try: assert initial_pid != next(six.iterkeys(process_manager._process_map)) finally: process_manager.stop_restarting() process_manager.kill_children() time.sleep(0.5) # Are there child processes still running? if process_manager._process_map.keys(): process_manager.send_signal_to_processes(signal.SIGKILL) process_manager.stop_restarting() process_manager.kill_children() @die def test_restarting(self): ''' Make sure that the process is alive 2s later ''' process_manager = salt.utils.process.ProcessManager() process_manager.add_process(self.die_restarting) initial_pid = next(six.iterkeys(process_manager._process_map)) time.sleep(2) process_manager.check_children() try: assert initial_pid != next(six.iterkeys(process_manager._process_map)) finally: process_manager.stop_restarting() process_manager.kill_children() time.sleep(0.5) # Are there child processes still running? if process_manager._process_map.keys(): process_manager.send_signal_to_processes(signal.SIGKILL) process_manager.stop_restarting() process_manager.kill_children() @skipIf(sys.version_info < (2, 7), 'Needs > Py 2.7 due to bug in stdlib') @incr def test_counter(self): counter = multiprocessing.Value('i', 0) process_manager = salt.utils.process.ProcessManager() process_manager.add_process(self.incr_counter, args=(counter, 2)) time.sleep(1) process_manager.check_children() time.sleep(1) # we should have had 2 processes go at it try: assert counter.value == 4 finally: process_manager.stop_restarting() process_manager.kill_children() time.sleep(0.5) # Are there child processes still running? if process_manager._process_map.keys(): process_manager.send_signal_to_processes(signal.SIGKILL) process_manager.stop_restarting() process_manager.kill_children() class TestThreadPool(TestCase): def test_basic(self): ''' Make sure the threadpool can do things ''' def incr_counter(counter): counter.value += 1 counter = multiprocessing.Value('i', 0) pool = salt.utils.process.ThreadPool() sent = pool.fire_async(incr_counter, args=(counter,)) self.assertTrue(sent) time.sleep(1) # Sleep to let the threads do things self.assertEqual(counter.value, 1) self.assertEqual(pool._job_queue.qsize(), 0) def test_full_queue(self): ''' Make sure that a full threadpool acts as we expect ''' def incr_counter(counter): counter.value += 1 counter = multiprocessing.Value('i', 0) # Create a pool with no workers and 1 queue size pool = salt.utils.process.ThreadPool(0, 1) # make sure we can put the one item in sent = pool.fire_async(incr_counter, args=(counter,)) self.assertTrue(sent) # make sure we can't put more in sent = pool.fire_async(incr_counter, args=(counter,)) self.assertFalse(sent) time.sleep(1) # Sleep to let the threads do things # make sure no one updated the counter self.assertEqual(counter.value, 0) # make sure the queue is still full self.assertEqual(pool._job_queue.qsize(), 1) class TestProcess(TestCase): @skipIf(NO_MOCK, NO_MOCK_REASON) def test_daemonize_if(self): # pylint: disable=assignment-from-none with patch('sys.argv', ['salt-call']): ret = salt.utils.process.daemonize_if({}) self.assertEqual(None, ret) ret = salt.utils.process.daemonize_if({'multiprocessing': False}) self.assertEqual(None, ret) with patch('sys.platform', 'win'): ret = salt.utils.process.daemonize_if({}) self.assertEqual(None, ret) with patch('salt.utils.process.daemonize'), \ patch('sys.platform', 'linux2'): salt.utils.process.daemonize_if({}) self.assertTrue(salt.utils.process.daemonize.called) # pylint: enable=assignment-from-none class TestSignalHandlingProcess(TestCase): @classmethod def Process(cls, pid): raise psutil.NoSuchProcess(pid) @classmethod def target(cls): os.kill(os.getpid(), signal.SIGTERM) @classmethod def children(cls, *args, **kwargs): raise psutil.NoSuchProcess(1) @skipIf(NO_MOCK, NO_MOCK_REASON) def test_process_does_not_exist(self): try: with patch('psutil.Process', self.Process): proc = salt.utils.process.SignalHandlingProcess(target=self.target) proc.start() except psutil.NoSuchProcess: assert False, "psutil.NoSuchProcess raised" @skipIf(NO_MOCK, NO_MOCK_REASON) def test_process_children_do_not_exist(self): try: with patch('psutil.Process.children', self.children): proc = salt.utils.process.SignalHandlingProcess(target=self.target) proc.start() except psutil.NoSuchProcess: assert False, "psutil.NoSuchProcess raised" @staticmethod def run_forever_sub_target(evt): 'Used by run_forever_target to create a sub-process' while not evt.is_set(): time.sleep(1) @staticmethod def run_forever_target(sub_target, evt): 'A target that will run forever or until an event is set' p = multiprocessing.Process(target=sub_target, args=(evt,)) p.start() p.join() @staticmethod def kill_target_sub_proc(): pid = os.fork() if pid == 0: return pid = os.fork() if pid == 0: return time.sleep(.1) try: os.kill(os.getpid(), signal.SIGINT) except KeyboardInterrupt: pass @skipIf(sys.platform.startswith('win'), 'No os.fork on Windows') def test_signal_processing_regression_test(self): evt = multiprocessing.Event() sh_proc = salt.utils.process.SignalHandlingProcess( target=self.run_forever_target, args=(self.run_forever_sub_target, evt) ) sh_proc.start() proc = multiprocessing.Process(target=self.kill_target_sub_proc) proc.start() proc.join() # When the bug exists, the kill_target_sub_proc signal will kill both # processes. sh_proc will be alive if the bug is fixed try: assert sh_proc.is_alive() finally: evt.set() sh_proc.join() @staticmethod def no_op_target(): pass @skipIf(NO_MOCK, NO_MOCK_REASON) def test_signal_processing_test_after_fork_called(self): 'Validate Process and sub classes call after fork methods' evt = multiprocessing.Event() sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals' log_to_mock = 'salt.utils.process.Process._setup_process_logging' with patch(sig_to_mock) as ma, patch(log_to_mock) as mb: self.sh_proc = salt.utils.process.SignalHandlingProcess(target=self.no_op_target) self.sh_proc.run() ma.assert_called() mb.assert_called() @skipIf(NO_MOCK, NO_MOCK_REASON) def test_signal_processing_test_final_methods_called(self): 'Validate Process and sub classes call finalize methods' evt = multiprocessing.Event() teardown_to_mock = 'salt.log.setup.shutdown_multiprocessing_logging' log_to_mock = 'salt.utils.process.Process._setup_process_logging' sig_to_mock = 'salt.utils.process.SignalHandlingProcess._setup_signals' # Mock _setup_signals so we do not register one for this process. with patch(sig_to_mock): with patch(teardown_to_mock) as ma, patch(log_to_mock) as mb: self.sh_proc = salt.utils.process.SignalHandlingProcess(target=self.no_op_target) self.sh_proc.run() ma.assert_called() mb.assert_called() @staticmethod def pid_setting_target(sub_target, val, evt): val.value = os.getpid() p = multiprocessing.Process(target=sub_target, args=(evt,)) p.start() p.join() @skipIf(sys.platform.startswith('win'), 'Required signals not supported on windows') def test_signal_processing_handle_signals_called(self): 'Validate SignalHandlingProcess handles signals' # Gloobal event to stop all processes we're creating evt = multiprocessing.Event() # Create a process to test signal handler val = multiprocessing.Value('i', 0) proc = salt.utils.process.SignalHandlingProcess( target=self.pid_setting_target, args=(self.run_forever_sub_target, val, evt), ) proc.start() # Create a second process that should not respond to SIGINT or SIGTERM proc2 = multiprocessing.Process( target=self.run_forever_target, args=(self.run_forever_sub_target, evt), ) proc2.start() # Wait for the sub process to set it's pid while not val.value: time.sleep(.3) assert not proc.signal_handled() # Send a signal that should get handled by the subprocess os.kill(val.value, signal.SIGTERM) # wait up to 10 seconds for signal handler: start = time.time() while time.time() - start < 10: if proc.signal_handled(): break time.sleep(.3) try: # Allow some time for the signal handler to do it's thing assert proc.signal_handled() # Reap the signaled process proc.join(1) assert proc2.is_alive() finally: evt.set() proc2.join(30) proc.join(30) class TestDup2(TestCase): def test_dup2_no_fileno(self): 'The dup2 method does not fail on streams without fileno support' f1 = io.StringIO("some initial text data") f2 = io.StringIO("some initial other text data") with self.assertRaises(io.UnsupportedOperation): f1.fileno() with patch('os.dup2') as dup_mock: try: salt.utils.process.dup2(f1, f2) except io.UnsupportedOperation: assert False, 'io.UnsupportedOperation was raised' assert not dup_mock.called def null_target(): pass def event_target(event): while True: if event.wait(5): break class TestProcessList(TestCase): @staticmethod def wait_for_proc(proc, timeout=10): start = time.time() while proc.is_alive(): if time.time() - start > timeout: raise Exception("Process did not finishe before timeout") time.sleep(.3) def test_process_list_process(self): plist = salt.utils.process.SubprocessList() proc = multiprocessing.Process(target=null_target) proc.start() plist.add(proc) assert proc in plist.processes self.wait_for_proc(proc) assert not proc.is_alive() plist.cleanup() assert proc not in plist.processes def test_process_list_thread(self): plist = salt.utils.process.SubprocessList() thread = threading.Thread(target=null_target) thread.start() plist.add(thread) assert thread in plist.processes self.wait_for_proc(thread) assert not thread.is_alive() plist.cleanup() assert thread not in plist.processes def test_process_list_cleanup(self): plist = salt.utils.process.SubprocessList() event = multiprocessing.Event() proc = multiprocessing.Process(target=event_target, args=[event]) proc.start() plist.add(proc) assert proc in plist.processes plist.cleanup() event.set() assert proc in plist.processes self.wait_for_proc(proc) assert not proc.is_alive() plist.cleanup() assert proc not in plist.processes class TestDeprecatedClassNames(TestCase): @staticmethod def process_target(): pass @staticmethod def patched_warn_until_date(current_date): def _patched_warn_until_date(date, message, category=DeprecationWarning, stacklevel=None, _current_date=current_date, _dont_call_warnings=False): # Because we add another function in between, the stacklevel # set in salt.utils.process, 3, needs to now be 4 stacklevel = 4 return warn_until_date(date, message, category=category, stacklevel=stacklevel, _current_date=_current_date, _dont_call_warnings=_dont_call_warnings) return _patched_warn_until_date def test_multiprocessing_process_warning(self): # We *always* want *all* warnings thrown on this module warnings.filterwarnings('always', '', DeprecationWarning, __name__) fake_utcnow = datetime.date(2021, 1, 1) proc = None try: with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)): # Test warning with warnings.catch_warnings(record=True) as recorded_warnings: proc = salt.utils.process.MultiprocessingProcess(target=self.process_target) self.assertEqual( 'Please stop using \'salt.utils.process.MultiprocessingProcess\' ' 'and instead use \'salt.utils.process.Process\'. ' '\'salt.utils.process.MultiprocessingProcess\' will go away ' 'after 2022-01-01.', six.text_type(recorded_warnings[0].message) ) finally: if proc is not None: del proc def test_multiprocessing_process_runtime_error(self): fake_utcnow = datetime.date(2022, 1, 1) proc = None try: with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)): with self.assertRaisesRegex( RuntimeError, r"Please stop using 'salt.utils.process.MultiprocessingProcess' " r"and instead use 'salt.utils.process.Process'. " r"'salt.utils.process.MultiprocessingProcess' will go away " r'after 2022-01-01. ' r'This warning\(now exception\) triggered on ' r"filename '(.*)test_process.py', line number ([\d]+), is " r'supposed to be shown until ([\d-]+). Today is ([\d-]+). ' r'Please remove the warning.'): proc = salt.utils.process.MultiprocessingProcess(target=self.process_target) finally: if proc is not None: del proc def test_signal_handling_multiprocessing_process_warning(self): # We *always* want *all* warnings thrown on this module warnings.filterwarnings('always', '', DeprecationWarning, __name__) fake_utcnow = datetime.date(2021, 1, 1) proc = None try: with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)): # Test warning with warnings.catch_warnings(record=True) as recorded_warnings: proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target) self.assertEqual( 'Please stop using \'salt.utils.process.SignalHandlingMultiprocessingProcess\' ' 'and instead use \'salt.utils.process.SignalHandlingProcess\'. ' '\'salt.utils.process.SignalHandlingMultiprocessingProcess\' will go away ' 'after 2022-01-01.', six.text_type(recorded_warnings[0].message) ) finally: if proc is not None: del proc def test_signal_handling_multiprocessing_process_runtime_error(self): fake_utcnow = datetime.date(2022, 1, 1) proc = None try: with patch('salt.utils.versions.warn_until_date', self.patched_warn_until_date(fake_utcnow)): with self.assertRaisesRegex( RuntimeError, r"Please stop using 'salt.utils.process.SignalHandlingMultiprocessingProcess' " r"and instead use 'salt.utils.process.SignalHandlingProcess'. " r"'salt.utils.process.SignalHandlingMultiprocessingProcess' will go away " r'after 2022-01-01. ' r'This warning\(now exception\) triggered on ' r"filename '(.*)test_process.py', line number ([\d]+), is " r'supposed to be shown until ([\d-]+). Today is ([\d-]+). ' r'Please remove the warning.'): proc = salt.utils.process.SignalHandlingMultiprocessingProcess(target=self.process_target) finally: if proc is not None: del proc
multi-proc1.py
# reference : https://python.flowdas.com/library/multiprocessing.html from multiprocessing import current_process, Process, Value, Array double_value = 0 num_array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] def f(n, a): global double_value global num_array double_value = 3.1415927 num_array = [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] print(current_process().name) if __name__ == '__main__': print(current_process().name) num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print(double_value) print(num_array) print(num.value) print(arr[:])
dispatch.py
# Copyright 2017, Inderpreet Singh, All rights reserved. from enum import Enum from typing import List import queue import logging import os import threading import time from abc import ABC, abstractmethod import re from .extract import Extract, ExtractError from model import ModelFile from common import AppError class ExtractDispatchError(AppError): pass class ExtractListener(ABC): @abstractmethod def extract_completed(self, name: str, is_dir: bool): pass @abstractmethod def extract_failed(self, name: str, is_dir: bool): pass class ExtractStatus: """ Represents the status of a single extraction request """ class State(Enum): EXTRACTING = 0 def __init__(self, name: str, is_dir: bool, state: State): self.__name = name self.__is_dir = is_dir self.__state = state @property def name(self) -> str: return self.__name @property def is_dir(self) -> bool: return self.__is_dir @property def state(self) -> State: return self.__state def __eq__(self, other): return self.__dict__ == other.__dict__ class ExtractDispatch: __WORKER_SLEEP_INTERVAL_IN_SECS = 0.5 class _Task: def __init__(self, root_name: str, root_is_dir: bool): self.root_name = root_name self.root_is_dir = root_is_dir self.archive_paths = [] # list of (archive path, out path) pairs def add_archive(self, archive_path: str, out_dir_path: str): self.archive_paths.append((archive_path, out_dir_path)) def __init__(self, out_dir_path: str, local_path: str): self.__out_dir_path = out_dir_path self.__local_path = local_path self.__task_queue = queue.Queue() self.__worker = threading.Thread(name="ExtractWorker", target=self.__worker) self.__worker_shutdown = threading.Event() self.__listeners = [] self.__listeners_lock = threading.Lock() self.logger = logging.getLogger(self.__class__.__name__) def set_base_logger(self, base_logger: logging.Logger): self.logger = base_logger.getChild(self.__class__.__name__) def start(self): self.__worker.start() def stop(self): self.__worker_shutdown.set() self.__worker.join() def add_listener(self, listener: ExtractListener): self.__listeners_lock.acquire() self.__listeners.append(listener) self.__listeners_lock.release() def status(self) -> List[ExtractStatus]: tasks = list(self.__task_queue.queue) statuses = [] for task in tasks: status = ExtractStatus(name=task.root_name, is_dir=task.root_is_dir, state=ExtractStatus.State.EXTRACTING) statuses.append(status) return statuses def extract(self, model_file: ModelFile): self.logger.debug("Received extract for {}".format(model_file.name)) for task in self.__task_queue.queue: if task.root_name == model_file.name: self.logger.info("Ignoring extract for {}, already exists".format(model_file.name)) return # noinspection PyProtectedMember task = ExtractDispatch._Task(model_file.name, model_file.is_dir) if model_file.is_dir: # For a directory, try and find all archives # Loop through all directories using BFS frontier = [model_file] while frontier: curr_file = frontier.pop(0) if curr_file.is_dir: frontier += curr_file.get_children() else: archive_full_path = os.path.join(self.__local_path, curr_file.full_path) out_dir_path = os.path.join(self.__out_dir_path, os.path.dirname(curr_file.full_path)) if curr_file.local_size is not None \ and curr_file.local_size > 0 \ and Extract.is_archive(archive_full_path): task.add_archive(archive_path=archive_full_path, out_dir_path=out_dir_path) # Coalesce extractions ExtractDispatch.__coalesce_extractions(task) # Verify that there was at least one archive file if len(task.archive_paths) > 0: self.__task_queue.put(task) else: raise ExtractDispatchError( "Directory does not contain any archives: {}".format(model_file.name) ) else: # For a single file, it must exist locally and must be an archive if model_file.local_size in (None, 0): raise ExtractDispatchError("File does not exist locally: {}".format(model_file.name)) archive_full_path = os.path.join(self.__local_path, model_file.name) if not Extract.is_archive(archive_full_path): raise ExtractDispatchError("File is not an archive: {}".format(model_file.name)) task.add_archive(archive_path=archive_full_path, out_dir_path=self.__out_dir_path) self.__task_queue.put(task) def __worker(self): self.logger.debug("Started worker thread") while not self.__worker_shutdown.is_set(): # Try to grab next task # Do another check for shutdown while len(self.__task_queue.queue) > 0 and not self.__worker_shutdown.is_set(): # peek the task task = self.__task_queue.queue[0] # We have a task, extract archives one by one completed = True try: for archive_path, out_dir_path in task.archive_paths: if self.__worker_shutdown.is_set(): # exit early self.logger.warning("Extraction failed, shutdown requested") completed = False break self.logger.debug("Extracting {}".format(archive_path)) Extract.extract_archive( archive_path=archive_path, out_dir_path=out_dir_path ) except ExtractError: self.logger.exception("Caught an extraction error") completed = False finally: # pop the task self.__task_queue.get(block=False) # Send notification to listeners self.__listeners_lock.acquire() for listener in self.__listeners: if completed: listener.extract_completed(task.root_name, task.root_is_dir) else: listener.extract_failed(task.root_name, task.root_is_dir) self.__listeners_lock.release() time.sleep(ExtractDispatch.__WORKER_SLEEP_INTERVAL_IN_SECS) self.logger.debug("Stopped worker thread") @staticmethod def __coalesce_extractions(task: _Task): """ Remove duplicate extractions due to split files :param task: :return: """ # Filter out any rxx files for a split rar filtered_paths = [] for archive_path, out_path in task.archive_paths: file_ext = os.path.splitext(os.path.basename(archive_path))[1] if not re.match("^\.r\d{2,}$", file_ext): filtered_paths.append((archive_path, out_path)) task.archive_paths = filtered_paths
old_heft_scheduler.py
import sys import os sys.path.append(os.getcwd()) import pyschedcl as fw import logging import argparse import json #import sys import time import datetime #import plotly.plotly as py from os import listdir from os.path import isfile, join import matplotlib.pyplot as plt import networkx as nx import csv import random import time import threading import numpy as np logging.basicConfig(level=logging.DEBUG) fw.logging.basicConfig(level=logging.DEBUG) def parse_arg(args=None): parser = argparse.ArgumentParser(description='Schedule set of independent OpenCL Task directed acyclic grpah on CPU-GPU heterogeneous multicores') parser.add_argument('-f', '--file', help='Input task file containing list of <json filename, partition class, dataset> tuples', default ='dag_info/dag_transformer/') parser.add_argument('-ng', '--nGPU', help='Number of GPUs', default='1') parser.add_argument('-nc', '--nCPU', help='Number of CPUs', default='1') parser.add_argument('-l', '--log', help='Flag for turning on LOG', action="store_true") parser.add_argument('-g', '--graph', help='Flag for plotting GANTT chart for execution', action="store_true") parser.add_argument('-df', '--dump_output_file', help='Flag for dumping output file for a kernel', action="store_true") parser.add_argument('-t', '--task', help='reduce everything to a single task', action="store_true", default=False) parser.add_argument('-ag', '--all_gpu', help='if --task/-t flag is enabled all kernels are moved to gpu if -ag is on, else cpu', action="store_true", default=False) parser.add_argument('-rc','--recreate_dag', help = 'recreate the dag json file from dag.graph specification', action = "store_true", default=False ) parser.add_argument('-nchk','--num_chunks', help = 'number of chunks to split the kernels', default='1' ) parser.add_argument('-ce','--check_error', help = 'print error for some kernels', action = "store_true", default = False) parser.add_argument('-thd','--use_thread', help = "Use threaded scheduler", action = "store_true", default = False) parser.add_argument('-fdp','--full_dump_path', help = "Specify Full Dump Path for profiling results", default='None') return parser.parse_args(args) def all_done(): for dag in all_dags: if not dag.finished(): return False return True def random_selector(Q,start_sched): fw.frontier_Q_lock.acquire() while ((not Q) and (not all_done())): fw.frontier_Q_lock.wait() if all_done(): total_time_in_multiple_dag_devices = time.time()-start_sched print "\t \t Total Time measured by the scheduler - ",total_time_in_multiple_dag_devices fw.frontier_Q_lock.release() return -1 #fw.frontier_Q_lock.release() task = Q[0] del Q[0] fw.frontier_Q_lock.release() return task def priority_selector(Q,start_sched): fw.frontier_Q_lock.acquire() while ((not Q) and (not all_done())): fw.frontier_Q_lock.wait() if all_done(): total_time_in_multiple_dag_devices = time.time()-start_sched print "\t \t Total Time measured by the scheduler - ",total_time_in_multiple_dag_devices fw.frontier_Q_lock.release() return -1 #fw.frontier_Q_lock.release() counter = 0 max_rank = 0.0 max_rank_task_index = 0 for t in Q: print "Comparing ", t.rank, "and ",max_rank if t.rank > max_rank: max_rank_task_index=counter max_rank=t.rank counter +=1 print "SELECTED: Q[",max_rank_task_index,"]",max_rank, task = Q[max_rank_task_index] print task.id del Q[max_rank_task_index] fw.frontier_Q_lock.release() return task if __name__ == '__main__': args = parse_arg(sys.argv[1:]) if args.recreate_dag: fw.create_dag("./database/info/","./dag_info/dag_transformer/dag.graph","./dag_info/dag_transformer/t1.json"\ ,partition=10) num_chunks = int(args.num_chunks) fw.just_for_testing_num_chunks = num_chunks info_file = args.file cmd_qs, ctxs, gpus, cpus = fw.host_initialize(int(args.nGPU), int(args.nCPU),use_mul_queues = True) #Dags_folder = list() all_dags = [] #list of all the DAGs finished_task_Dag = dict() deleted_task_dag = list() all_dags_jsons = [join(info_file,f) for f in listdir(info_file)] #list of json files - each json file corresponds to a single DAG gantt_label = [(info_file + f) for f in listdir(info_file)] gantt = 0 # count = 0 # count1 = 0 # task_dag_id = 0 frontier_Q = fw.frontier_Q ex_stats ="logs/transformer_profiling_128_128_128.json" for i,dag_json_file in enumerate(all_dags_jsons): if dag_json_file.endswith('json'): logging.debug("main : Reading json file "+ dag_json_file) with open(dag_json_file,"r") as f: info = json.loads(f.read()) logging.debug("main : prepraing task dag number "+str(i)) all_dags.append(fw.TaskDAG(info,dag_number = i ,dataset = 1024,map_all=args.task,all_map_to_gpu=args.all_gpu,\ gpus=gpus,cpus=cpus,ctxs=ctxs,cmd_qs=cmd_qs,ex_stats_file=ex_stats)) #create dag for info file (ex :- dag_test1/t1.json) logging.debug("main : prepared task dag number "+str(i)+"\n\n") fw.frontier_Q_lock.acquire() frontier_Q.extend(all_dags[-1].free_tasks) for task in all_dags[-1].free_tasks: task.has_enqueued = True fw.frontier_Q_lock.release() for dag in all_dags: dag.compute_blevel_ranks() print "Printing initial frontier_Q tasks\n\n" for i,task in enumerate(fw.frontier_Q): print "task number "+str(i+1)+ " rank value: "+ str(task.rank) # logging.debug("it's free kernels "+str([k.id for k in task.free_kernels])) # logging.debug("it's all kernels "+str([k.id for k in task.kernels])) # logging.debug("it's dag id "+str(task.task_dag_object.id)) # logging.debug("it's optm device is "+str(task.optm_device)) #sys.exit(-1) start_sched = time.time() while True: logging.debug("before selection length of frontier_Q : "+str(len(frontier_Q))) next_task = random_selector(frontier_Q,start_sched) if next_task == -1: logging.debug("all dags are finished ") break logging.debug("task selected "+str(next_task.id)) optm_device = next_task.optm_device if int(optm_device) == 10: optm_device = "gpu" logging.debug("gpu selected") elif int(optm_device) == 0: optm_device = "cpu" logging.debug("cpu selected") else: raise logging.debug("after selection length of frontier_Q : "+str(len(frontier_Q))+"\n") fw.rqLock.acquire() while not (len(fw.ready_queue[optm_device]) > 0): fw.rqLock.wait() #now device is free and time to schedule task logging.debug("current ready queue "+str(fw.ready_queue[optm_device])) #print(list(fw.ready_queue[optm_device])) next_task.allocate_devices(list(fw.ready_queue[optm_device])) logging.debug(str(fw.ready_queue[optm_device])) fw.rqLock.release() if args.use_thread: dispatch_thread = threading.Thread(target=next_task.dispatch_all,args=()) dispatch_thread.start() else: next_task.dispatch_all(gpus, cpus, ctxs, cmd_qs) #next_task.dispatch_all(gpus, cpus, ctxs,cmd_qs) #fw.rqLock.acquire() #logging.debug("Number of threads running are "+str(threading.active_count())) logging.debug("Profiling the execution") ref = {'cpu' : None, 'gpu' : None} for dag in all_dags: for kernel_id,kernel in dag.kernels.items(): #print "\t Kernel ",kernel.name dev = kernel.task_object.device for ev in kernel.write_events: ev.wait() start_time = ev.profile.START *1e-9 end_time = ev.profile.END *1e-9 if not ref[dev]: ref[dev] = start_time else: ref[dev] = min(ref[dev],start_time) for ev in kernel.nd_range_event: ev.wait() start_time = ev.profile.START*1e-9 if not ref[dev]: ref[dev] = start_time else: ref[dev] = min(ref[dev],start_time) for ev in kernel.read_events: ev.wait() start_time = ev.profile.START *1e-9 end_time = ev.profile.END *1e-9 if not ref[dev]: ref[dev] = start_time else: ref[dev] = min(ref[dev],start_time) host_st = None host_en = None timestamps = {} for dag in all_dags: print "dag number : ",dag.id for kernel_id,kernel in dag.kernels.items(): timestamps[kernel.name+str(kernel_id)] = {} dev = kernel.task_object.device fin = ref[dev] kernel_timestamps = timestamps[kernel.name+str(kernel_id)] print "\t Kernel ",kernel.name, " ",kernel.id, " ",dev kernel_timestamps["write"] = {"host_queued":kernel.host_events[0].write_start,\ "device_queued":-1,"device_start":-1,"device_end":-1} kernel_timestamps["device"] = dev st = None for ev in kernel.write_events: ev.wait() queue_time = ev.profile.QUEUED*1e-9 start_time = ev.profile.START *1e-9 end_time = ev.profile.END *1e-9 if kernel_timestamps["write"]["device_queued"] == -1: kernel_timestamps["write"]["device_queued"] = queue_time else: kernel_timestamps["write"]["device_queued"] = min(queue_time,kernel_timestamps["write"]["device_queued"]) if kernel_timestamps["write"]["device_start"] == -1: kernel_timestamps["write"]["device_start"] = start_time else: kernel_timestamps["write"]["device_start"] = min(start_time,kernel_timestamps["write"]["device_start"]) if kernel_timestamps["write"]["device_end"] == -1: kernel_timestamps["write"]["device_end"] = end_time else: kernel_timestamps["write"]["device_end"] = max(end_time,kernel_timestamps["write"]["device_end"]) print "\t\t Write event | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev] #kernel_timestamps["write"].append([start_time-ref[dev],end_time-ref[dev]]) if st == None: st = start_time else: st = min(st,start_time) fin = max(fin,end_time) kernel_timestamps["nd_range"] = {"device_start":-1,"device_end":-1} # ev = kernel.nd_range_event for ev in kernel.nd_range_event: ev.wait() start_time = ev.profile.START*1e-9 end_time = ev.profile.END*1e-9 print "\t\t ND range | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev] #kernel_timestamps["nd_range"].append([start_time-ref[dev],end_time-ref[dev]]) if st==None: st = start_time else: st = min(st,start_time) fin = max(fin,end_time) if kernel_timestamps["nd_range"]["device_start"] == -1: kernel_timestamps["nd_range"]["device_start"] = start_time else: kernel_timestamps["nd_range"]["device_start"] = min(start_time,kernel_timestamps["nd_range"]["device_start"]) if kernel_timestamps["nd_range"]["device_end"] == -1: kernel_timestamps["nd_range"]["device_end"] = end_time else: kernel_timestamps["nd_range"]["device_end"] = max(end_time,kernel_timestamps["nd_range"]["device_end"]) kernel_timestamps["read"] = {"device_start":-1,"device_end":-1} for ev in kernel.read_events: ev.wait() start_time = ev.profile.START*1e-9 end_time = ev.profile.END*1e-9 print "\t\t Read event | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev] #kernel_timestamps["read"].append([start_time-ref[dev],end_time-ref[dev]]) if st==None: st = start_time else: st = min(st,start_time) fin = max(fin,end_time) if kernel_timestamps["read"]["device_start"] == -1: kernel_timestamps["read"]["device_start"] = start_time else: kernel_timestamps["read"]["device_start"] = min(start_time,kernel_timestamps["read"]["device_start"]) if kernel_timestamps["read"]["device_end"] == -1: kernel_timestamps["read"]["device_end"] = end_time else: kernel_timestamps["read"]["device_end"] = max(end_time,kernel_timestamps["read"]["device_end"]) err = 0 if args.check_error: if kernel.name.endswith("copy"): if kernel.name[:4] == "coal": m = kernel.global_work_size[1] n = kernel.global_work_size[0] else: m = kernel.global_work_size[0] n = kernel.global_work_size[1] inp = kernel.data["input"][0] out = kernel.data["output"][0] inp = inp.reshape(m,n) out = out.reshape(m,n) err = np.mean((out-inp)**2) if kernel.name.endswith("transpose"): if kernel.name[:4] == "coal": m = kernel.global_work_size[1] n = kernel.global_work_size[0] else: m = kernel.global_work_size[0] n = kernel.global_work_size[1] inp = kernel.data["input"][0] out = kernel.data["output"][0] inp = inp.reshape(m,n) out = out.reshape(n,m) err = np.mean((out-inp.T)**2) if "gemm" in kernel.name.lower(): m = kernel.symbolic_variables["m1"] p = kernel.symbolic_variables["p1"] n = kernel.symbolic_variables["n1"] inp1 = kernel.data["input"][0] inp2 = kernel.data["input"][1] bias = kernel.data["input"][2] out = kernel.data["output"][0] inp1 = inp1.reshape(m,p) inp2 = inp2.reshape(p,n) out = out.reshape(m,n) err = np.mean((inp1.dot(inp2)+bias-out)**2) #print "\t \t "+str(kernel_timestamps["write"]["host_queued"]-kernel_timestamps["write"]["device_queued"]) # print "\t \t Time taken(measured by device times stamps)", fin-st # if kernel.host_events[0].read_end and kernel.host_events[0].write_start: # host_total_time = kernel.host_events[0].read_end-kernel.host_events[0].write_start # print "\t \t Time Taken (measured by host time stamps) : ",host_total_time # # # # if host_en == None: # host_en = kernel.host_events[0].read_end # else: # host_en = max(host_en,kernel.host_events[0].read_end) # # if host_st == None: # host_st = kernel.host_events[0].write_start # else: # host_st = min(host_st,kernel.host_events[0].write_start) # # # total_host_overhead = host_total_time - (fin-st) # print "\t \t Measured Host overhead :",total_host_overhead # print "\t \t Percentage overhead:",total_host_overhead*100/host_total_time # if args.check_error: # print "\t \t Error :- ",err # print "\n" # # # else: # print "\t \t Host Profiling data not available, continuing..\n" # # en = host_en-host_st # # print "Total Time as measured by Host read callback threads is ",en # #print "Total Time as measured by scheduler is ",total_time_in_multiple_dag_devices # # #print timestamps print "\n" #print json.dumps(timestamps,sort_keys=True,indent=2) #print timestamps if args.full_dump_path == "None": if args.use_thread: with open("./scheduling/dumps/thread.json","wb") as f: print "saving to thread.json" json.dump(timestamps,f) else: with open("./scheduling/dumps/non_thread.json","wb") as f: print "saving to non_thread.json" json.dump(timestamps,f) else: with open(args.full_dump_path,"wb") as f: print "saving to ",args.full_dump_path json.dump(timestamps,f) time.sleep(2)
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import threading import time import ast try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse # pylint: disable=import-error from binascii import hexlify from os import urandom import datetime import json import ssl import sys import uuid from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports import OpenSSL.crypto from fabric import Connection from knack.prompting import prompt_pass, NoTTYException from knack.util import CLIError from knack.log import get_logger from msrestazure.azure_exceptions import CloudError from msrestazure.tools import is_valid_resource_id, parse_resource_id from azure.mgmt.storage import StorageManagementClient from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient from azure.mgmt.relay.models import AccessRights from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory from azure.cli.command_modules.network._client_factory import network_client_factory from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.commands import LongRunningOperation from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \ ConfiguredDefaultSetter, sdk_no_wait from azure.cli.core.util import get_az_user_agent from azure.cli.core.profiles import ResourceType, get_sdk from .tunnel import TunnelServer from .vsts_cd_provider import VstsContinuousDeliveryProvider from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES from ._client_factory import web_client_factory, ex_handler_factory from ._appservice_utils import _generic_site_operation from .utils import _normalize_sku, get_sku_name, retryable_method from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details, should_create_new_rg, set_location, does_app_already_exist, get_profile_username, get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use, detect_os_form_src) from ._constants import (FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION, FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION, FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS, NODE_VERSION_DEFAULT, DOTNET_RUNTIME_VERSION_TO_DOTNET_LINUX_FX_VERSION) logger = get_logger(__name__) # pylint:disable=no-member,too-many-lines,too-many-locals # region "Common routines shared with quick-start extensions." # Please maintain compatibility in both interfaces and functionalities" def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None, multicontainer_config_type=None, multicontainer_config_file=None, tags=None, using_webapp_up=False, language=None): SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models( 'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair') if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') docker_registry_server_url = parse_docker_image_name(deployment_container_image_name) client = web_client_factory(cmd.cli_ctx) if is_valid_resource_id(plan): parse_result = parse_resource_id(plan) plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name']) else: plan_info = client.app_service_plans.get(resource_group_name, plan) if not plan_info: raise CLIError("The plan '{}' doesn't exist".format(plan)) is_linux = plan_info.reserved node_default_version = NODE_VERSION_DEFAULT location = plan_info.location # This is to keep the existing appsettings for a newly created webapp on existing webapp name. name_validation = client.check_name_availability(name, 'Site') if not name_validation.name_available: existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings') settings = [] for k, v in existing_app_settings.properties.items(): settings.append(NameValuePair(name=k, value=v)) site_config = SiteConfig(app_settings=settings) else: site_config = SiteConfig(app_settings=[]) if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1', 'B1', 'B2', 'B3', 'BASIC']: site_config.always_on = True webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags, https_only=using_webapp_up) helper = _StackRuntimeHelper(cmd, client, linux=is_linux) if is_linux: if not validate_container_app_create_options(runtime, deployment_container_image_name, multicontainer_config_type, multicontainer_config_file): raise CLIError("usage error: --runtime | --deployment-container-image-name |" " --multicontainer-config-type TYPE --multicontainer-config-file FILE") if startup_file: site_config.app_command_line = startup_file if runtime: site_config.linux_fx_version = runtime match = helper.resolve(runtime) if not match: raise CLIError("Linux Runtime '{}' is not supported." "Please invoke 'list-runtimes' to cross check".format(runtime)) elif deployment_container_image_name: site_config.linux_fx_version = _format_fx_version(deployment_container_image_name) if name_validation.name_available: site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE", value="false")) elif multicontainer_config_type and multicontainer_config_file: encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file) site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type) elif plan_info.is_xenon: # windows container webapp site_config.windows_fx_version = _format_fx_version(deployment_container_image_name) elif runtime: # windows webapp with runtime specified if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]): raise CLIError("usage error: --startup-file or --deployment-container-image-name or " "--multicontainer-config-type and --multicontainer-config-file is " "only appliable on linux webapp") match = helper.resolve(runtime) if not match: raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long match['setter'](cmd=cmd, stack=match, site_config=site_config) # Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack if not match['displayName'].startswith('node'): if name_validation.name_available: site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION", value=node_default_version)) else: # windows webapp without runtime specified if name_validation.name_available: site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION", value=node_default_version)) if site_config.app_settings: for setting in site_config.app_settings: logger.info('Will set appsetting %s', setting) if using_webapp_up: # when the routine is invoked as a help method for webapp up if name_validation.name_available: logger.info("will set appsetting for enabling build") site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True)) if language is not None and language.lower() == 'dotnetcore': if name_validation.name_available: site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK', value='https://{}.scm.azurewebsites.net/detectors' .format(name))) poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def) webapp = LongRunningOperation(cmd.cli_ctx)(poller) # Ensure SCC operations follow right after the 'create', no precedent appsetting update commands _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name) if deployment_container_image_name: update_container_settings(cmd, resource_group_name, name, docker_registry_server_url, deployment_container_image_name, docker_registry_server_user, docker_registry_server_password=docker_registry_server_password) return webapp def validate_container_app_create_options(runtime=None, deployment_container_image_name=None, multicontainer_config_type=None, multicontainer_config_file=None): if bool(multicontainer_config_type) != bool(multicontainer_config_file): return False opts = [runtime, deployment_container_image_name, multicontainer_config_type] return len([x for x in opts if x]) == 1 # you can only specify one out the combinations def parse_docker_image_name(deployment_container_image_name): if not deployment_container_image_name: return None slash_ix = deployment_container_image_name.rfind('/') docker_registry_server_url = deployment_container_image_name[0:slash_ix] if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url): return None return docker_registry_server_url def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None): if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) result, slot_result = {}, {} # pylint: disable=too-many-nested-blocks for src, dest in [(settings, result), (slot_settings, slot_result)]: for s in src: try: temp = shell_safe_json_parse(s) if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command for t in temp: if t.get('slotSetting', True): slot_result[t['name']] = t['value'] # Mark each setting as the slot setting else: result[t['name']] = t['value'] else: dest.update(temp) except CLIError: setting_name, value = s.split('=', 1) dest[setting_name] = value result.update(slot_result) for setting_name, value in result.items(): app_settings.properties[setting_name] = value client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_application_settings', app_settings.properties, slot, client) app_settings_slot_cfg_names = [] if slot_result: new_slot_setting_names = slot_result.keys() slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or [] slot_cfg_names.app_setting_names += new_slot_setting_names app_settings_slot_cfg_names = slot_cfg_names.app_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _build_app_settings_output(result.properties, app_settings_slot_cfg_names) def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name, share_name, access_key, mount_path=None, slot=None, slot_setting=False): AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue') azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) if custom_id in azure_storage_accounts.properties: raise CLIError("Site already configured with an Azure storage account with the id '{}'. " "Use 'az webapp config storage-account update' to update an existing " "Azure storage account configuration.".format(custom_id)) azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name, share_name=share_name, access_key=access_key, mount_path=mount_path) client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) if slot_setting: slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or [] if custom_id not in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.append(custom_id) client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None, share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False): AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue') azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) existing_account_config = azure_storage_accounts.properties.pop(custom_id, None) if not existing_account_config: raise CLIError("No Azure storage account configuration found with the id '{}'. " "Use 'az webapp config storage-account add' to add a new " "Azure storage account configuration.".format(custom_id)) new_account_config = AzureStorageInfoValue( type=storage_type or existing_account_config.type, account_name=account_name or existing_account_config.account_name, share_name=share_name or existing_account_config.share_name, access_key=access_key or existing_account_config.access_key, mount_path=mount_path or existing_account_config.mount_path ) azure_storage_accounts.properties[custom_id] = new_account_config client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) if slot_setting: slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or [] if custom_id not in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.append(custom_id) client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None): client = web_client_factory(cmd.cli_ctx) app = client.web_apps.get(resource_group_name, name) if app is None: raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. ' 'Please make sure these values are correct.'.format(name, resource_group_name)) parse_plan_id = parse_resource_id(app.server_farm_id) plan_info = None retry_delay = 10 # seconds # We need to retry getting the plan because sometimes if the plan is created as part of function app, # it can take a couple of tries before it gets the plan for _ in range(5): plan_info = client.app_service_plans.get(parse_plan_id['resource_group'], parse_plan_id['name']) if plan_info is not None: break time.sleep(retry_delay) if build_remote and not app.reserved: raise CLIError('Remote build is only available on Linux function apps') is_consumption = is_plan_consumption(cmd, plan_info) if (not build_remote) and is_consumption and app.reserved: return upload_zip_to_storage(cmd, resource_group_name, name, src, slot) if build_remote: add_remote_build_app_settings(cmd, resource_group_name, name, slot) else: remove_remote_build_app_settings(cmd, resource_group_name, name, slot) return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot) def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None): return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot) def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None): logger.warning("Getting scm site credentials for zip deployment") user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) try: scm_url = _get_scm_url(cmd, resource_group_name, name, slot) except ValueError: raise CLIError('Failed to fetch scm url for function app') zip_url = scm_url + '/api/zipdeploy?isAsync=true' deployment_status_url = scm_url + '/api/deployments/latest' import urllib3 authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) headers = authorization headers['Content-Type'] = 'application/octet-stream' headers['Cache-Control'] = 'no-cache' headers['User-Agent'] = get_az_user_agent() import requests import os from azure.cli.core.util import should_disable_connection_verify # Read file content with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs: zip_content = fs.read() logger.warning("Starting zip deployment. This operation can take a while to complete ...") res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify()) logger.warning("Deployment endpoint responded with status code %d", res.status_code) # check if there's an ongoing process if res.status_code == 409: raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. " "Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting " "is removed.".format(deployment_status_url)) # check the status of async deployment response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url, authorization, timeout) return response def add_remote_build_app_settings(cmd, resource_group_name, name, slot): settings = get_app_settings(cmd, resource_group_name, name, slot) scm_do_build_during_deployment = None website_run_from_package = None enable_oryx_build = None app_settings_should_not_have = [] app_settings_should_contain = {} for keyval in settings: value = keyval['value'].lower() if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT': scm_do_build_during_deployment = value in ('true', '1') if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE': website_run_from_package = value if keyval['name'] == 'ENABLE_ORYX_BUILD': enable_oryx_build = value if scm_do_build_during_deployment is not True: logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true") update_app_settings(cmd, resource_group_name, name, [ "SCM_DO_BUILD_DURING_DEPLOYMENT=true" ], slot) app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true' if website_run_from_package: logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting") delete_app_settings(cmd, resource_group_name, name, [ "WEBSITE_RUN_FROM_PACKAGE" ], slot) app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE') if enable_oryx_build: logger.warning("Removing ENABLE_ORYX_BUILD app setting") delete_app_settings(cmd, resource_group_name, name, [ "ENABLE_ORYX_BUILD" ], slot) app_settings_should_not_have.append('ENABLE_ORYX_BUILD') # Wait for scm site to get the latest app settings if app_settings_should_not_have or app_settings_should_contain: logger.warning("Waiting SCM site to be updated with the latest app settings") scm_is_up_to_date = False retries = 10 while not scm_is_up_to_date and retries >= 0: scm_is_up_to_date = validate_app_settings_in_scm( cmd, resource_group_name, name, slot, should_contain=app_settings_should_contain, should_not_have=app_settings_should_not_have) retries -= 1 time.sleep(5) if retries < 0: logger.warning("App settings may not be propagated to the SCM site.") def remove_remote_build_app_settings(cmd, resource_group_name, name, slot): settings = get_app_settings(cmd, resource_group_name, name, slot) scm_do_build_during_deployment = None app_settings_should_contain = {} for keyval in settings: value = keyval['value'].lower() if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT': scm_do_build_during_deployment = value in ('true', '1') if scm_do_build_during_deployment is not False: logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false") update_app_settings(cmd, resource_group_name, name, [ "SCM_DO_BUILD_DURING_DEPLOYMENT=false" ], slot) app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false' # Wait for scm site to get the latest app settings if app_settings_should_contain: logger.warning("Waiting SCM site to be updated with the latest app settings") scm_is_up_to_date = False retries = 10 while not scm_is_up_to_date and retries >= 0: scm_is_up_to_date = validate_app_settings_in_scm( cmd, resource_group_name, name, slot, should_contain=app_settings_should_contain) retries -= 1 time.sleep(5) if retries < 0: logger.warning("App settings may not be propagated to the SCM site") def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) storage_connection = None for keyval in settings: if keyval['name'] == 'AzureWebJobsStorage': storage_connection = str(keyval['value']) if storage_connection is None: raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting') container_name = "function-releases" blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4())) BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService') block_blob_service = BlockBlobService(connection_string=storage_connection) if not block_blob_service.exists(container_name): block_blob_service.create_container(container_name) # https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 def progress_callback(current, total): total_length = 30 filled_length = int(round(total_length * current) / float(total)) percents = round(100.0 * current / float(total), 1) progress_bar = '=' * filled_length + '-' * (total_length - filled_length) progress_message = 'Uploading {} {}%'.format(progress_bar, percents) cmd.cli_ctx.get_progress_controller().add(message=progress_message) block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True, progress_callback=progress_callback) now = datetime.datetime.now() blob_start = now - datetime.timedelta(minutes=10) blob_end = now + datetime.timedelta(weeks=520) BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions') blob_token = block_blob_service.generate_blob_shared_access_signature(container_name, blob_name, permission=BlobPermissions(read=True), expiry=blob_end, start=blob_start) blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token) website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri) update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting]) client = web_client_factory(cmd.cli_ctx) try: logger.info('\nSyncing Triggers...') if slot is not None: client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot) else: client.web_apps.sync_function_triggers(resource_group_name, name) except CloudError as ce: # This SDK function throws an error if Status Code is 200 if ce.status_code != 200: raise ce def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name, setting_properties, slot=None, client=None): client = client or web_client_factory(cli_ctx) operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot') if slot is None: return operation(resource_group_name, name, str, setting_properties) return operation(resource_group_name, name, slot, str, setting_properties) def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None): webapp = app_instance if not app_instance: # when the routine is invoked as a help method, not through commands webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not webapp: raise CLIError("'{}' app doesn't exist".format(name)) _rename_server_farm_props(webapp) _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot) return webapp # for generic updater def get_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): instance = kwargs['parameters'] client = web_client_factory(cmd.cli_ctx) updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance, skip_dns_registration=skip_dns_registration, skip_custom_domain_verification=skip_custom_domain_verification, force_dns_registration=force_dns_registration, ttl_in_seconds=ttl_in_seconds) if slot: kwargs['slot'] = slot return updater(**kwargs) def update_webapp(instance, client_affinity_enabled=None, https_only=None): if 'function' in instance.kind: raise CLIError("please use 'az functionapp update' to update this function app") if client_affinity_enabled is not None: instance.client_affinity_enabled = client_affinity_enabled == 'true' if https_only is not None: instance.https_only = https_only == 'true' return instance def update_functionapp(cmd, instance, plan=None): client = web_client_factory(cmd.cli_ctx) if plan is not None: if is_valid_resource_id(plan): dest_parse_result = parse_resource_id(plan) dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'], dest_parse_result['name']) else: dest_plan_info = client.app_service_plans.get(instance.resource_group, plan) if dest_plan_info is None: raise CLIError("The plan '{}' doesn't exist".format(plan)) validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info) instance.server_farm_id = dest_plan_info.id return instance def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance): general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.' src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id) src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'], src_parse_result['name']) if src_plan_info is None: raise CLIError('Could not determine the current plan of the functionapp') if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)): raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg) if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)): raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' + general_switch_msg) def set_functionapp(cmd, resource_group_name, name, **kwargs): instance = kwargs['parameters'] if 'function' not in instance.kind: raise CLIError('Not a function app to update') client = web_client_factory(cmd.cli_ctx) return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance) def list_webapp(cmd, resource_group_name=None): result = _list_app(cmd.cli_ctx, resource_group_name) return [r for r in result if 'function' not in r.kind] def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None): result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot) return sorted(result, key=lambda site: site.deleted_site_id) def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None): DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest') request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request) def list_function_app(cmd, resource_group_name=None): result = _list_app(cmd.cli_ctx, resource_group_name) return [r for r in result if 'function' in r.kind] def _list_app(cli_ctx, resource_group_name=None): client = web_client_factory(cli_ctx) if resource_group_name: result = list(client.web_apps.list_by_resource_group(resource_group_name)) else: result = list(client.web_apps.list()) for webapp in result: _rename_server_farm_props(webapp) return result def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None): client = web_client_factory(cli_ctx) locations = _get_deleted_apps_locations(cli_ctx) result = list() for location in locations: result = result + list(client.deleted_web_apps.list_by_location(location)) if resource_group_name: result = [r for r in result if r.resource_group == resource_group_name] if name: result = [r for r in result if r.deleted_site_name.lower() == name.lower()] if slot: result = [r for r in result if r.slot.lower() == slot.lower()] return result def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None): ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity') def getter(): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def setter(webapp): webapp.identity = ManagedServiceIdentity(type='SystemAssigned') poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp) return LongRunningOperation(cmd.cli_ctx)(poller) from azure.cli.core.commands.arm import assign_identity as _assign_identity webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope) return webapp.identity def show_identity(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity def remove_identity(cmd, resource_group_name, name, slot=None): ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity') def getter(): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) def setter(webapp): webapp.identity = ManagedServiceIdentity(type='None') poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp) return LongRunningOperation(cmd.cli_ctx)(poller) from azure.cli.core.commands.arm import assign_identity as _assign_identity webapp = _assign_identity(cmd.cli_ctx, getter, setter) return webapp.identity def get_auth_settings(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot) def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument client_id=None, token_store_enabled=None, # pylint: disable=unused-argument token_refresh_extension_hours=None, # pylint: disable=unused-argument allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument microsoft_account_client_secret=None, # pylint: disable=unused-argument microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument auth_settings = get_auth_settings(cmd, resource_group_name, name, slot) UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction') if action == 'AllowAnonymous': auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous elif action: auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page auth_settings.default_provider = AUTH_TYPES[action] import inspect frame = inspect.currentframe() bool_flags = ['enabled', 'token_store_enabled'] # note: getargvalues is used already in azure.cli.core.commands. # and no simple functional replacement for this deprecating method for 3.5 args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method for arg in args[2:]: if values.get(arg, None): setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true') return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings) def list_runtimes(cmd, linux=False): client = web_client_factory(cmd.cli_ctx) runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux) return [s['displayName'] for s in runtime_helper.stacks] def _rename_server_farm_props(webapp): # Should be renamed in SDK in a future release setattr(webapp, 'app_service_plan_id', webapp.server_farm_id) del webapp.server_farm_id return webapp def delete_function_app(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot) def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None, keep_dns_registration=None, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.delete_slot(resource_group_name, name, slot, delete_metrics=False if keep_metrics else None, delete_empty_server_farm=False if keep_empty_plan else None, skip_dns_registration=False if keep_dns_registration else None) else: client.web_apps.delete(resource_group_name, name, delete_metrics=False if keep_metrics else None, delete_empty_server_farm=False if keep_empty_plan else None, skip_dns_registration=False if keep_dns_registration else None) def stop_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot) def start_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot) def restart_webapp(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot) def get_site_configs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot) def get_app_settings(cmd, resource_group_name, name, slot=None): result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) client = web_client_factory(cmd.cli_ctx) slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names return _build_app_settings_output(result.properties, slot_app_setting_names) # Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint # should_have [] is a list of app settings which are expected to be set # should_not_have [] is a list of app settings which are expected to be absent # should_contain {} is a dictionary of app settings which are expected to be set with precise values # Return True if validation succeeded def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None, should_have=None, should_not_have=None, should_contain=None): scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot) scm_setting_keys = set(scm_settings.keys()) if should_have and not set(should_have).issubset(scm_setting_keys): return False if should_not_have and set(should_not_have).intersection(scm_setting_keys): return False temp_setting = scm_settings.copy() temp_setting.update(should_contain or {}) if temp_setting != scm_settings: return False return True @retryable_method(3, 5) def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None): scm_url = _get_scm_url(cmd, resource_group_name, name, slot) settings_url = '{}/api/settings'.format(scm_url) username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) headers = { 'Content-Type': 'application/octet-stream', 'Cache-Control': 'no-cache', 'User-Agent': get_az_user_agent() } import requests response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3) return response.json() or {} def get_connection_strings(cmd, resource_group_name, name, slot=None): result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory(cmd.cli_ctx) slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \ .connection_string_names or [] result = [{'name': p, 'value': result.properties[p].value, 'type':result.properties[p].type, 'slotSetting': p in slot_constr_names} for p in result.properties] return result def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None): client = web_client_factory(cmd.cli_ctx) result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \ .azure_storage_config_names or [] return [{'name': p, 'value': result.properties[p], 'slotSetting': p in slot_azure_storage_config_names} for p in result.properties] def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None): profiles = list_publish_profiles(cmd, resource_group_name, name, slot) url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP') setattr(webapp, 'ftpPublishingUrl', url) return webapp def _format_fx_version(custom_image_name, container_config_type=None): fx_version = custom_image_name.strip() fx_version_lower = fx_version.lower() # handles case of only spaces if fx_version: if container_config_type: fx_version = '{}|{}'.format(container_config_type, custom_image_name) elif not fx_version_lower.startswith('docker|'): fx_version = '{}|{}'.format('DOCKER', custom_image_name) else: fx_version = ' ' return fx_version def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None): fx_version = _format_fx_version(custom_image_name) web_app = get_webapp(cmd, resource_group_name, name, slot) linux_fx = fx_version if web_app.reserved else None windows_fx = fx_version if web_app.is_xenon else None return update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot) def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None): return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot) def _get_fx_version(cmd, resource_group_name, name, slot=None): site_config = get_site_configs(cmd, resource_group_name, name, slot) return site_config.linux_fx_version or site_config.windows_fx_version or '' def url_validator(url): try: result = urlparse(url) return all([result.scheme, result.netloc, result.path]) except ValueError: return False def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None): from base64 import b64decode linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot) if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]): raise CLIError("Cannot decode config that is not one of the" " following types: {}".format(','.join(MULTI_CONTAINER_TYPES))) return b64decode(linux_fx_version.split('|')[1].encode('utf-8')) def _get_linux_multicontainer_encoded_config_from_file(file_name): from base64 import b64encode config_file_bytes = None if url_validator(file_name): response = urlopen(file_name, context=_ssl_context()) config_file_bytes = response.read() else: with open(file_name, 'rb') as f: config_file_bytes = f.read() # Decode base64 encoded byte array into string return b64encode(config_file_bytes).decode('utf-8') # for any modifications to the non-optional parameters, adjust the reflection logic accordingly # in the method # pylint: disable=unused-argument def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None, windows_fx_version=None, pre_warmed_instance_count=None, php_version=None, python_version=None, net_framework_version=None, java_version=None, java_container=None, java_container_version=None, remote_debugging_enabled=None, web_sockets_enabled=None, always_on=None, auto_heal_enabled=None, use32_bit_worker_process=None, min_tls_version=None, http20_enabled=None, app_command_line=None, ftps_state=None, generic_configurations=None): configs = get_site_configs(cmd, resource_group_name, name, slot) if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20) if linux_fx_version: if linux_fx_version.strip().lower().startswith('docker|'): update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"]) else: delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"]) if pre_warmed_instance_count is not None: pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count, min_val=0, max_val=20) import inspect frame = inspect.currentframe() bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on', 'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled'] int_flags = ['pre_warmed_instance_count', 'number_of_workers'] # note: getargvalues is used already in azure.cli.core.commands. # and no simple functional replacement for this deprecating method for 3.5 args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method for arg in args[3:]: if arg in int_flags and values[arg] is not None: values[arg] = validate_and_convert_to_int(arg, values[arg]) if arg != 'generic_configurations' and values.get(arg, None): setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true') generic_configurations = generic_configurations or [] result = {} for s in generic_configurations: try: result.update(get_json_object(s)) except CLIError: config_name, value = s.split('=', 1) result[config_name] = value for config_name, value in result.items(): setattr(configs, config_name, value) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None): app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: app_settings.properties.pop(setting_name, None) if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names: slot_cfg_names.app_setting_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_application_settings', app_settings.properties, slot, client) return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names) def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None): azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_azure_storage_accounts', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False azure_storage_accounts.properties.pop(custom_id, None) if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names: slot_cfg_names.azure_storage_config_names.remove(custom_id) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_azure_storage_accounts', azure_storage_accounts.properties, slot, client) return result.properties def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _build_app_settings_output(app_settings, slot_cfg_names): slot_cfg_names = slot_cfg_names or [] return [{'name': p, 'value': app_settings[p], 'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)] def update_connection_strings(cmd, resource_group_name, name, connection_string_type, settings=None, slot=None, slot_settings=None): from azure.mgmt.web.models import ConnStringValueTypePair if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) for name_value in settings + slot_settings: # split at the first '=', connection string should not have '=' in the name conn_string_name, value = name_value.split('=', 1) if value[0] in ["'", '"']: # strip away the quots used as separators value = value[1:-1] conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value, type=connection_string_type) client = web_client_factory(cmd.cli_ctx) result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_connection_strings', conn_strings.properties, slot, client) if slot_settings: new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings] slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or [] slot_cfg_names.connection_string_names += new_slot_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None): conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory(cmd.cli_ctx) slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: conn_strings.properties.pop(setting_name, None) if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names: slot_cfg_names.connection_string_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name, 'update_connection_strings', conn_strings.properties, slot, client) CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME', 'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"] APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD'] def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None, docker_custom_image_name=None, docker_registry_server_user=None, websites_enable_app_service_storage=None, docker_registry_server_password=None, multicontainer_config_type=None, multicontainer_config_file=None, slot=None): settings = [] if docker_registry_server_url is not None: settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url) if (not docker_registry_server_user and not docker_registry_server_password and docker_registry_server_url and '.azurecr.io' in docker_registry_server_url): logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(docker_registry_server_url) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name) except Exception as ex: # pylint: disable=broad-except logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed if docker_registry_server_user is not None: settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user) if docker_registry_server_password is not None: settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password) if docker_custom_image_name is not None: _add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot) if websites_enable_app_service_storage: settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage) if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long update_app_settings(cmd, resource_group_name, name, settings, slot) settings = get_app_settings(cmd, resource_group_name, name, slot) if multicontainer_config_file and multicontainer_config_type: encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file) linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type) update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot) elif multicontainer_config_file or multicontainer_config_type: logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE') return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings, slot=slot)) def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None, docker_custom_image_name=None, docker_registry_server_user=None, docker_registry_server_password=None, slot=None): return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url, docker_custom_image_name, docker_registry_server_user, None, docker_registry_server_password, multicontainer_config_type=None, multicontainer_config_file=None, slot=slot) def _get_acr_cred(cli_ctx, registry_name): from azure.mgmt.containerregistry import ContainerRegistryManagementClient from azure.cli.core.commands.parameters import get_resources_in_subscription client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries') result = [item for item in result if item.name.lower() == registry_name] if not result or len(result) > 1: raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name)) resource_group_name = parse_resource_id(result[0].id)['resource_group'] registry = client.get(resource_group_name, registry_name) if registry.admin_user_enabled: # pylint: disable=no-member cred = client.list_credentials(resource_group_name, registry_name) return cred.username, cred.passwords[0].value raise CLIError("Failed to retrieve container registry credentials. Please either provide the " "credentials or run 'az acr update -n {} --admin-enabled true' to enable " "admin first.".format(registry_name)) def delete_container_settings(cmd, resource_group_name, name, slot=None): _delete_linux_fx_version(cmd, resource_group_name, name, slot) delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot) def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings, show_multicontainer_config, slot)) def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None): return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot) def _filter_for_container_settings(cmd, resource_group_name, name, settings, show_multicontainer_config=None, slot=None): result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES] fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip() if fx_version: added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME', 'value': fx_version} result.append(added_image_name) if show_multicontainer_config: decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot) decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED', 'value': decoded_value} result.append(decoded_image_name) return result # TODO: remove this when #3660(service tracking issue) is resolved def _mask_creds_related_appsettings(settings): for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]: settings[x] = None return settings def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None): HostNameBinding = cmd.get_models('HostNameBinding') client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, webapp_name) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp_name)) binding = HostNameBinding(location=webapp.location, site_name=webapp.name) if slot is None: return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding) return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding, slot) def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname) return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname) def list_hostnames(cmd, resource_group_name, webapp_name, slot=None): result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_host_name_bindings', slot)) for r in result: r.name = r.name.split('/')[-1] return result def get_external_ip(cmd, resource_group_name, webapp_name): SslState = cmd.get_models('SslState') # logics here are ported from portal client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, webapp_name) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp_name)) if webapp.hosting_environment_profile: address = client.app_service_environments.list_vips( resource_group_name, webapp.hosting_environment_profile.name) if address.internal_ip_address: ip_address = address.internal_ip_address else: vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None) ip_address = vip.virtual_ip if vip else address.service_ip_address else: ip_address = _resolve_hostname_through_dns(webapp.default_host_name) return {'ip': ip_address} def _resolve_hostname_through_dns(hostname): import socket return socket.gethostbyname(hostname) def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None): Site, SiteConfig = cmd.get_models('Site', 'SiteConfig') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, webapp) if not site: raise CLIError("'{}' app doesn't exist".format(webapp)) if 'functionapp' in site.kind: raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp)) location = site.location slot_def = Site(server_farm_id=site.server_farm_id, location=location) slot_def.site_config = SiteConfig() poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot) result = LongRunningOperation(cmd.cli_ctx)(poller) if configuration_source: update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source) result.name = result.name.split('/')[-1] return result def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None): Site = cmd.get_models('Site') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' function app doesn't exist".format(name)) location = site.location slot_def = Site(server_farm_id=site.server_farm_id, location=location) poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot) result = LongRunningOperation(cmd.cli_ctx)(poller) if configuration_source: update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source) result.name = result.name.split('/')[-1] return result def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None): clone_from_prod = configuration_source.lower() == webapp.lower() site_config = get_site_configs(cmd, resource_group_name, webapp, None if clone_from_prod else configuration_source) _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config) # slot create doesn't clone over the app-settings and connection-strings, so we do it here # also make sure slot settings don't get propagated. slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp) src_slot = None if clone_from_prod else configuration_source app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'list_application_settings', src_slot) for a in slot_cfg_names.app_setting_names or []: app_settings.properties.pop(a, None) connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'list_connection_strings', src_slot) for a in slot_cfg_names.connection_string_names or []: connection_strings.properties.pop(a, None) _generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_application_settings', app_settings.properties, slot, client) _generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_connection_strings', connection_strings.properties, slot, client) def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals manual_integration=None, git_token=None, slot=None, cd_app_type=None, app_working_dir=None, nodejs_task_runner=None, python_framework=None, python_version=None, cd_account_create=None, cd_project_url=None, test=None, slot_swap=None, private_repo_username=None, private_repo_password=None): client = web_client_factory(cmd.cli_ctx) location = _get_location_from_webapp(client, resource_group_name, name) if cd_project_url: # Add default values cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type python_framework = 'Django' if python_framework is None else python_framework python_version = 'Python 3.5.3 x86' if python_version is None else python_version webapp_list = None if test is None else list_webapp(resource_group_name) vsts_provider = VstsContinuousDeliveryProvider() cd_app_type_details = { 'cd_app_type': cd_app_type, 'app_working_dir': app_working_dir, 'nodejs_task_runner': nodejs_task_runner, 'python_framework': python_framework, 'python_version': python_version } try: status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url, branch, git_token, slot_swap, cd_app_type_details, cd_project_url, cd_account_create, location, test, private_repo_username, private_repo_password, webapp_list) except RuntimeError as ex: raise CLIError(ex) logger.warning(status.status_message) return status non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework, python_version, cd_account_create, test, slot_swap] if any(non_vsts_params): raise CLIError('Following parameters are of no use when cd_project_url is None: ' + 'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' + 'python_version, cd_account_create, test, slot_swap') from azure.mgmt.web.models import SiteSourceControl, SourceControl if git_token: sc = SourceControl(location=location, source_control_name='GitHub', token=git_token) client.update_source_control('GitHub', sc) source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch, is_manual_integration=manual_integration, is_mercurial=(repository_type != 'git')) # SCC config can fail if previous commands caused SCMSite shutdown, so retry here. for i in range(5): try: poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_source_control', slot, source_control) return LongRunningOperation(cmd.cli_ctx)(poller) except Exception as ex: # pylint: disable=broad-except import re ex = ex_handler_factory(no_throw=True)(ex) # for non server errors(50x), just throw; otherwise retry 4 times if i == 4 or not re.findall(r'\(50\d\)', str(ex)): raise logger.warning('retrying %s/4', i + 1) time.sleep(5) # retry in a moment def update_git_token(cmd, git_token=None): ''' Update source control token cached in Azure app service. If no token is provided, the command will clean up existing token. ''' client = web_client_factory(cmd.cli_ctx) from azure.mgmt.web.models import SourceControl sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '') return client.update_source_control('GitHub', sc) def show_source_control(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot) def delete_source_control(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot) def enable_local_git(cmd, resource_group_name, name, slot=None): SiteConfigResource = cmd.get_models('SiteConfigResource') client = web_client_factory(cmd.cli_ctx) location = _get_location_from_webapp(client, resource_group_name, name) site_config = SiteConfigResource(location=location) site_config.scm_type = 'LocalGit' if slot is None: client.web_apps.create_or_update_configuration(resource_group_name, name, site_config) else: client.web_apps.create_or_update_configuration_slot(resource_group_name, name, site_config, slot) return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)} def sync_site_repo(cmd, resource_group_name, name, slot=None): try: return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot) except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here if ex.status_code not in [200, 204]: raise ex def list_app_service_plans(cmd, resource_group_name=None): client = web_client_factory(cmd.cli_ctx) if resource_group_name is None: plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites" else: plans = list(client.app_service_plans.list_by_resource_group(resource_group_name)) for plan in plans: # prune a few useless fields del plan.geo_region del plan.subscription return plans def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False, app_service_environment=None, sku='B1', number_of_workers=None, location=None, tags=None, no_wait=False): HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models( 'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan') sku = _normalize_sku(sku) _validate_asp_sku(app_service_environment, sku) if is_linux and hyper_v: raise CLIError('usage error: --is-linux | --hyper-v') client = web_client_factory(cmd.cli_ctx) if app_service_environment: if hyper_v: raise CLIError('Windows containers is not yet supported in app service environment') ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name) ase_def = HostingEnvironmentProfile(id=ase_id) ase_list = client.app_service_environments.list() ase_found = False for ase in ase_list: if ase.id.lower() == ase_id.lower(): location = ase.location ase_found = True break if not ase_found: raise CLIError("App service environment '{}' not found in subscription.".format(ase_id)) else: # Non-ASE ase_def = None if location is None: location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) # the api is odd on parameter naming, have to live with it for now sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers) plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def, reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name, per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def) return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name, resource_group_name=resource_group_name, app_service_plan=plan_def) def update_app_service_plan(instance, sku=None, number_of_workers=None): if number_of_workers is None and sku is None: logger.warning('No update is done. Specify --sku and/or --number-of-workers.') sku_def = instance.sku if sku is not None: sku = _normalize_sku(sku) sku_def.tier = get_sku_name(sku) sku_def.name = sku if number_of_workers is not None: sku_def.capacity = number_of_workers instance.sku = sku_def return instance def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None): instance = update_app_service_plan(instance, sku, number_of_workers) if max_burst is not None: if not is_plan_elastic_premium(cmd, instance): raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans") max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20) instance.maximum_elastic_worker_count = max_burst if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances', number_of_workers, min_val=0, max_val=20) return update_app_service_plan(instance, sku, number_of_workers) def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None): try: return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot) except Exception: # pylint: disable=broad-except raise CLIError('Backup configuration not found') def list_backups(cmd, resource_group_name, webapp_name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot) def create_backup(cmd, resource_group_name, webapp_name, storage_account_url, db_name=None, db_type=None, db_connection_string=None, backup_name=None, slot=None): BackupRequest = cmd.get_models('BackupRequest') client = web_client_factory(cmd.cli_ctx) if backup_name and backup_name.lower().endswith('.zip'): backup_name = backup_name[:-4] db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) backup_request = BackupRequest(backup_name=backup_name, storage_account_url=storage_account_url, databases=db_setting) if slot: return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot) return client.web_apps.backup(resource_group_name, webapp_name, backup_request) def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None, frequency=None, keep_at_least_one_backup=None, retention_period_in_days=None, db_name=None, db_connection_string=None, db_type=None, backup_name=None, slot=None): DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models( 'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest') configuration = None if backup_name and backup_name.lower().endswith('.zip'): backup_name = backup_name[:-4] if not backup_name: backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M')) try: configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'get_backup_configuration', slot) except DefaultErrorResponseException: # No configuration set yet if not all([storage_account_url, frequency, retention_period_in_days, keep_at_least_one_backup]): raise CLIError('No backup configuration found. A configuration must be created. ' + 'Usage: --container-url URL --frequency TIME --retention DAYS ' + '--retain-one TRUE/FALSE') # If arguments were not specified, use the values in the current backup schedule if storage_account_url is None: storage_account_url = configuration.storage_account_url if retention_period_in_days is None: retention_period_in_days = configuration.backup_schedule.retention_period_in_days if keep_at_least_one_backup is None: keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup else: keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true' if frequency: # Parse schedule frequency frequency_num, frequency_unit = _parse_frequency(cmd, frequency) else: frequency_num = configuration.backup_schedule.frequency_interval frequency_unit = configuration.backup_schedule.frequency_unit if configuration and configuration.databases: db = configuration.databases[0] db_type = db_type or db.database_type db_name = db_name or db.name db_connection_string = db_connection_string or db.connection_string db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name, keep_at_least_one_backup=keep_at_least_one_backup, retention_period_in_days=retention_period_in_days) backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule, enabled=True, storage_account_url=storage_account_url, databases=db_setting) return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration', slot, backup_request) def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name, db_name=None, db_type=None, db_connection_string=None, target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None): RestoreRequest = cmd.get_models('RestoreRequest') client = web_client_factory(cmd.cli_ctx) storage_blob_name = backup_name if not storage_blob_name.lower().endswith('.zip'): storage_blob_name += '.zip' db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string) restore_request = RestoreRequest(storage_account_url=storage_account_url, blob_name=storage_blob_name, overwrite=overwrite, site_name=target_name, databases=db_setting, ignore_conflicting_host_names=ignore_hostname_conflict) if slot: return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot) return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request) def list_snapshots(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots', slot) def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name source_resource_group=None, source_name=None, source_slot=None): from azure.cli.core.commands.client_factory import get_subscription_id SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest') client = web_client_factory(cmd.cli_ctx) recover_config = not restore_content_only if all([source_resource_group, source_name]): # Restore from source app to target app sub_id = get_subscription_id(cmd.cli_ctx) source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \ "/providers/Microsoft.Web/sites/" + source_name if source_slot: source_id = source_id + "/slots/" + source_slot source = SnapshotRecoverySource(id=source_id) request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source, recover_configuration=recover_config) if slot: return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot) return client.web_apps.restore_snapshot(resource_group_name, name, request) if any([source_resource_group, source_name]): raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used') # Overwrite app with its own snapshot request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config) if slot: return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot) return client.web_apps.restore_snapshot(resource_group_name, name, request) # pylint: disable=inconsistent-return-statements def _create_db_setting(cmd, db_name, db_type, db_connection_string): DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting') if all([db_name, db_type, db_connection_string]): return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)] if any([db_name, db_type, db_connection_string]): raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING') def _parse_frequency(cmd, frequency): FrequencyUnit = cmd.get_models('FrequencyUnit') unit_part = frequency.lower()[-1] if unit_part == 'd': frequency_unit = FrequencyUnit.day elif unit_part == 'h': frequency_unit = FrequencyUnit.hour else: raise CLIError('Frequency must end with d or h for "day" or "hour"') try: frequency_num = int(frequency[:-1]) except ValueError: raise CLIError('Frequency must start with a number') if frequency_num < 0: raise CLIError('Frequency must be positive') return frequency_num, frequency_unit def _get_location_from_resource_group(cli_ctx, resource_group_name): client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) group = client.resource_groups.get(resource_group_name) return group.location def _get_location_from_webapp(client, resource_group_name, webapp): webapp = client.web_apps.get(resource_group_name, webapp) if not webapp: raise CLIError("'{}' app doesn't exist".format(webapp)) return webapp.location def _get_deleted_apps_locations(cli_ctx): client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) web_provider = client.providers.get('Microsoft.Web') del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None) if del_sites_resource: return del_sites_resource.locations return [] def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None): user = client.get_publishing_user() result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot) parsed = urlparse(result.repo_url) return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name, parsed.netloc, name) def _get_scm_url(cmd, resource_group_name, name, slot=None): from azure.mgmt.web.models import HostType webapp = show_webapp(cmd, resource_group_name, name, slot=slot) for host in webapp.host_name_ssl_states or []: if host.host_type == HostType.repository: return "https://{}".format(host.name) # this should not happen, but throw anyway raise ValueError('Failed to retrieve Scm Uri') def get_publishing_user(cmd): client = web_client_factory(cmd.cli_ctx) return client.get_publishing_user() def set_deployment_user(cmd, user_name, password=None): ''' Update deployment credentials.(Note, all webapps in your subscription will be impacted) ''' User = cmd.get_models('User') client = web_client_factory(cmd.cli_ctx) user = User(publishing_user_name=user_name) if password is None: try: password = prompt_pass(msg='Password: ', confirm=True) except NoTTYException: raise CLIError('Please specify both username and password in non-interactive mode.') user.publishing_password = password return client.update_publishing_user(user) def list_publishing_credentials(cmd, resource_group_name, name, slot=None): content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot) return content.result() def list_publish_profiles(cmd, resource_group_name, name, slot=None): import xmltodict content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_publishing_profile_xml_with_secrets', slot) full_xml = '' for f in content: full_xml += f.decode() profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] converted = [] for profile in profiles: new = {} for key in profile: # strip the leading '@' xmltodict put in for attributes new[key.lstrip('@')] = profile[key] converted.append(new) return converted def enable_cd(cmd, resource_group_name, name, enable, slot=None): settings = [] settings.append("DOCKER_ENABLE_CI=" + enable) update_app_settings(cmd, resource_group_name, name, settings, slot) return show_container_cd_url(cmd, resource_group_name, name, slot) def show_container_cd_url(cmd, resource_group_name, name, slot=None): settings = get_app_settings(cmd, resource_group_name, name, slot) docker_enabled = False for setting in settings: if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true': docker_enabled = True break cd_settings = {} cd_settings['DOCKER_ENABLE_CI'] = docker_enabled if docker_enabled: credentials = list_publishing_credentials(cmd, resource_group_name, name, slot) if credentials: cd_url = credentials.scm_uri + '/docker/hook' cd_settings['CI_CD_URL'] = cd_url else: cd_settings['CI_CD_URL'] = '' return cd_settings def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False): url = _get_url(cmd, resource_group_name, name, slot) open_page_in_browser(url) if logs: get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot) def _get_url(cmd, resource_group_name, name, slot=None): SslState = cmd.get_models('SslState') site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not site: raise CLIError("'{}' app doesn't exist".format(name)) url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned ssl_host = next((h for h in site.host_name_ssl_states if h.ssl_state != SslState.disabled), None) return ('https' if ssl_host else 'http') + '://' + url # TODO: expose new blob suport def config_diagnostics(cmd, resource_group_name, name, level=None, application_logging=None, web_server_logging=None, docker_container_logging=None, detailed_error_messages=None, failed_request_tracing=None, slot=None): from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig, SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig, EnabledConfig) client = web_client_factory(cmd.cli_ctx) # TODO: ensure we call get_site only once site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' app doesn't exist".format(name)) location = site.location application_logs = None if application_logging is not None: if not application_logging: level = 'Off' elif level is None: level = 'Error' fs_log = FileSystemApplicationLogsConfig(level=level) application_logs = ApplicationLogsConfig(file_system=fs_log) http_logs = None server_logging_option = web_server_logging or docker_container_logging if server_logging_option: # TODO: az blob storage log config currently not in use, will be impelemented later. # Tracked as Issue: #4764 on Github filesystem_log_config = None turned_on = server_logging_option != 'off' if server_logging_option in ['filesystem', 'off']: # 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3, enabled=turned_on) http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None) detailed_error_messages_logs = (None if detailed_error_messages is None else EnabledConfig(enabled=detailed_error_messages)) failed_request_tracing_logs = (None if failed_request_tracing is None else EnabledConfig(enabled=failed_request_tracing)) site_log_config = SiteLogsConfig(location=location, application_logs=application_logs, http_logs=http_logs, failed_requests_tracing=failed_request_tracing_logs, detailed_error_messages=detailed_error_messages_logs) return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config', slot, site_log_config) def show_diagnostic_settings(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot) def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None): client = web_client_factory(cmd.cli_ctx) site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot) site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production') return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot) def list_slots(cmd, resource_group_name, webapp): client = web_client_factory(cmd.cli_ctx) slots = list(client.web_apps.list_slots(resource_group_name, webapp)) for slot in slots: slot.name = slot.name.split('/')[-1] setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name']) del slot.server_farm_id return slots def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'): client = web_client_factory(cmd.cli_ctx) if action == 'swap': poller = client.web_apps.swap_slot_slot(resource_group_name, webapp, slot, (target_slot or 'production'), True) return poller if action == 'preview': if target_slot is None: result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot, True) else: result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, target_slot, True) return result # we will reset both source slot and target slot if target_slot is None: client.web_apps.reset_production_slot_config(resource_group_name, webapp) else: client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot) return None def delete_slot(cmd, resource_group_name, webapp, slot): client = web_client_factory(cmd.cli_ctx) # TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc... client.web_apps.delete_slot(resource_group_name, webapp, slot) def set_traffic_routing(cmd, resource_group_name, name, distribution): RampUpRule = cmd.get_models('RampUpRule') client = web_client_factory(cmd.cli_ctx) site = client.web_apps.get(resource_group_name, name) if not site: raise CLIError("'{}' app doesn't exist".format(name)) configs = get_site_configs(cmd, resource_group_name, name) host_name_split = site.default_host_name.split('.', 1) host_name_suffix = '.' + host_name_split[1] host_name_val = host_name_split[0] configs.experiments.ramp_up_rules = [] for r in distribution: slot, percentage = r.split('=') action_host_name_slot = host_name_val + "-" + slot configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix, reroute_percentage=float(percentage), name=slot)) _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs) return configs.experiments.ramp_up_rules def show_traffic_routing(cmd, resource_group_name, name): configs = get_site_configs(cmd, resource_group_name, name) return configs.experiments.ramp_up_rules def clear_traffic_routing(cmd, resource_group_name, name): set_traffic_routing(cmd, resource_group_name, name, []) def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None): from azure.mgmt.web.models import CorsSettings configs = get_site_configs(cmd, resource_group_name, name, slot) if not configs.cors: configs.cors = CorsSettings() configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) return result.cors def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None): configs = get_site_configs(cmd, resource_group_name, name, slot) if configs.cors: if allowed_origins: configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins] else: configs.cors.allowed_origins = [] configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs) return configs.cors def show_cors(cmd, resource_group_name, name, slot=None): configs = get_site_configs(cmd, resource_group_name, name, slot) return configs.cors def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None): scm_url = _get_scm_url(cmd, resource_group_name, name, slot) streaming_url = scm_url + '/logstream' if provider: streaming_url += ('/' + provider.lstrip('/')) user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) t = threading.Thread(target=_get_log, args=(streaming_url, user, password)) t.daemon = True t.start() while True: time.sleep(100) # so that ctrl+c can stop the command def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None): scm_url = _get_scm_url(cmd, resource_group_name, name, slot) url = scm_url.rstrip('/') + '/dump' user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot) _get_log(url, user_name, password, log_file) logger.warning('Downloaded logs to %s', log_file) def _get_site_credential(cli_ctx, resource_group_name, name, slot=None): creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot) creds = creds.result() return (creds.publishing_user_name, creds.publishing_password) def _get_log(url, user_name, password, log_file=None): import certifi import urllib3 try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) r = http.request( 'GET', url, headers=headers, preload_content=False ) if r.status != 200: raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format( url, r.status, r.reason)) if log_file: # download logs with open(log_file, 'wb') as f: while True: data = r.read(1024) if not data: break f.write(data) else: # streaming std_encoding = sys.stdout.encoding for chunk in r.stream(): if chunk: # Extra encode() and decode for stdout which does not surpport 'utf-8' print(chunk.decode(encoding='utf-8', errors='replace') .encode(std_encoding, errors='replace') .decode(std_encoding, errors='replace'), end='') # each line of log has CRLF. r.release_conn() def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None): Certificate = cmd.get_models('Certificate') client = web_client_factory(cmd.cli_ctx) webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) cert_file = open(certificate_file, 'rb') cert_contents = cert_file.read() hosting_environment_profile_param = (webapp.hosting_environment_profile.name if webapp.hosting_environment_profile else '') thumb_print = _get_cert(certificate_password, certificate_file) cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param, webapp.location, resource_group_name) cert = Certificate(password=certificate_password, pfx_blob=cert_contents, location=webapp.location, server_farm_id=webapp.server_farm_id) return client.certificates.create_or_update(resource_group_name, cert_name, cert) def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name): return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name) def _get_cert(certificate_password, certificate_file): ''' Decrypts the .pfx file ''' p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password) cert = p12.get_certificate() digest_algorithm = 'sha1' thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '') return thumbprint def list_ssl_certs(cmd, resource_group_name): client = web_client_factory(cmd.cli_ctx) return client.certificates.list_by_resource_group(resource_group_name) def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint): client = web_client_factory(cmd.cli_ctx) webapp_certs = client.certificates.list_by_resource_group(resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: return client.certificates.delete(resource_group_name, webapp_cert.name) raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint)) def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name): Certificate = cmd.get_models('Certificate') client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, name) if not webapp: raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name)) server_farm_id = webapp.server_farm_id location = webapp.location kv_id = None if not is_valid_resource_id(key_vault): kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT) key_vaults = kv_client.vaults.list_by_subscription() for kv in key_vaults: if key_vault == kv.name: kv_id = kv.id break else: kv_id = key_vault if kv_id is None: kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \ 'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \ '\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \ '--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \ 'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name) logger.warning(kv_msg) return kv_id_parts = parse_resource_id(kv_id) kv_name = kv_id_parts['name'] kv_resource_group_name = kv_id_parts['resource_group'] kv_subscription = kv_id_parts['subscription'] cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name) lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html' lnk_msg = 'Find more details here: {}'.format(lnk) if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription): logger.warning('Unable to verify Key Vault permissions.') logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission') logger.warning(lnk_msg) kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='', key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id) return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name, certificate_envelope=kv_cert_def) def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None): Certificate = cmd.get_models('Certificate') hostname = hostname.lower() client = web_client_factory(cmd.cli_ctx) webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot) if not webapp: slot_text = "Deployment slot {} in ".format(slot) if slot else '' raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name)) parsed_plan_id = parse_resource_id(webapp.server_farm_id) plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name']) if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED': raise CLIError('Managed Certificate is not supported on Free and Shared tier.') if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot): slot_text = " --slot {}".format(slot) if slot else "" raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. " "Use 'az webapp config hostname add --resource-group {2} " "--webapp-name {1}{3} --hostname {0}' " "to register the hostname.".format(hostname, name, resource_group_name, slot_text)) server_farm_id = webapp.server_farm_id location = webapp.location easy_cert_def = Certificate(location=location, canonical_name=hostname, server_farm_id=server_farm_id, password='') return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name, certificate_envelope=easy_cert_def) def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription): from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory from azure.cli.command_modules.role._client_factory import _graph_client_factory from azure.graphrbac.models import GraphErrorException from azure.cli.core.commands.client_factory import get_subscription_id subscription = get_subscription_id(cmd.cli_ctx) # Cannot check if key vault is in another subscription if subscription != key_vault_subscription: return False kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None) vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name) # Check for Microsoft.Azure.WebSites app registration AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd' AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714' graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals for policy in vault.properties.access_policies: try: sp = graph_sp_client.get(policy.object_id) if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID: for perm in policy.permissions.secrets: if perm == "Get": return True except GraphErrorException: pass # Lookup will fail for non service principals (users, groups, etc.) return False def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp, host_name, ssl_state, thumbprint, slot=None): Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState') updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name, ssl_state=ssl_state, thumbprint=thumbprint, to_update=True)], location=webapp.location, tags=webapp.tags) return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update', slot, updated_webapp) def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): client = web_client_factory(cmd.cli_ctx) webapp = client.web_apps.get(resource_group_name, name) if not webapp: raise CLIError("'{}' app doesn't exist".format(name)) cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group'] webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'): return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp, webapp_cert.host_names[0], ssl_type, certificate_thumbprint, slot) query_result = list_hostnames(cmd, resource_group_name, name, slot) hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result] to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp) for h in to_update: _update_host_name_ssl_state(cmd, resource_group_name, name, webapp, h, ssl_type, certificate_thumbprint, slot) return show_webapp(cmd, resource_group_name, name, slot) raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint)) def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): SslState = cmd.get_models('SslState') return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot) def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None): SslState = cmd.get_models('SslState') return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, SslState.disabled, slot) def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp): # the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc matched = set() for hostname in hostnames_from_cert: if hostname.startswith('*'): for h in hostnames_in_webapp: if hostname[hostname.find('.'):] == h[h.find('.'):]: matched.add(h) elif hostname in hostnames_in_webapp: matched.add(hostname) return matched # help class handles runtime stack in format like 'node|6.1', 'php|5.5' class _StackRuntimeHelper(object): def __init__(self, cmd, client, linux=False): self._cmd = cmd self._client = client self._linux = linux self._stacks = [] def resolve(self, display_name): self._load_stacks() return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()), None) @property def stacks(self): self._load_stacks() return self._stacks @staticmethod def update_site_config(stack, site_config, cmd=None): for k, v in stack['configs'].items(): setattr(site_config, k, v) return site_config @staticmethod def update_site_appsettings(cmd, stack, site_config): NameValuePair = cmd.get_models('NameValuePair') if site_config.app_settings is None: site_config.app_settings = [] site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()] return site_config def _load_stacks(self): if self._stacks: return os_type = ('Linux' if self._linux else 'Windows') raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True) bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access json_value = bytes_value.decode('utf8') json_stacks = json.loads(json_value) stacks = json_stacks['value'] result = [] if self._linux: for properties in [(s['properties']) for s in stacks]: for major in properties['majorVersions']: default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']), None) result.append({ 'displayName': (default_minor['runtimeVersion'] if default_minor else major['runtimeVersion']) }) else: # Windows stacks config_mappings = { 'node': 'WEBSITE_NODE_DEFAULT_VERSION', 'python': 'python_version', 'php': 'php_version', 'aspnet': 'net_framework_version' } # get all stack version except 'java' for stack in stacks: if stack['name'] not in config_mappings: continue name, properties = stack['name'], stack['properties'] for major in properties['majorVersions']: default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']), None) result.append({ 'displayName': name + '|' + major['displayVersion'], 'configs': { config_mappings[name]: (default_minor['runtimeVersion'] if default_minor else major['runtimeVersion']) } }) # deal with java, which pairs with java container version java_stack = next((s for s in stacks if s['name'] == 'java')) java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers')) for java_version in java_stack['properties']['majorVersions']: for fx in java_container_stack['properties']['frameworks']: for fx_version in fx['majorVersions']: result.append({ 'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'], fx['display'], fx_version['displayVersion']), 'configs': { 'java_version': java_version['runtimeVersion'], 'java_container': fx['name'], 'java_container_version': fx_version['runtimeVersion'] } }) for r in result: r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in r['displayName'] else _StackRuntimeHelper.update_site_config) self._stacks = result def get_app_insights_key(cli_ctx, resource_group, name): appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient) appinsights = appinsights_client.components.get(resource_group, name) if appinsights is None or appinsights.instrumentation_key is None: raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group)) return appinsights.instrumentation_key def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None, max_burst=None, location=None, tags=None): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') sku = _normalize_sku(sku) tier = get_sku_name(sku) if max_burst is not None: if tier.lower() != "elasticpremium": raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans") max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20) if number_of_workers is not None: number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count', number_of_workers, min_val=0, max_val=20) client = web_client_factory(cmd.cli_ctx) if location is None: location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers) plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def, reserved=(is_linux or None), maximum_elastic_worker_count=max_burst, hyper_v=None, name=name) return client.app_service_plans.create_or_update(resource_group_name, name, plan_def) def is_plan_consumption(cmd, plan_info): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') if isinstance(plan_info, AppServicePlan): if isinstance(plan_info.sku, SkuDescription): return plan_info.sku.tier.lower() == 'dynamic' return False def is_plan_elastic_premium(cmd, plan_info): SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan') if isinstance(plan_info, AppServicePlan): if isinstance(plan_info.sku, SkuDescription): return plan_info.sku.tier == 'ElasticPremium' return False def validate_and_convert_to_int(flag, val): try: return int(val) except ValueError: raise CLIError("Usage error: {} is expected to have an int value.".format(flag)) def validate_range_of_int_flag(flag_name, value, min_val, max_val): value = validate_and_convert_to_int(flag_name, value) if min_val > value or value > max_val: raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val, max_val)) return value def create_function(cmd, resource_group_name, name, storage_account, plan=None, os_type=None, functions_version=None, runtime=None, runtime_version=None, consumption_plan_location=None, app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None, deployment_container_image_name=None, tags=None): # pylint: disable=too-many-statements, too-many-branches if functions_version is None: logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will " "be required. To create a 2.x function you would pass in the flag `--functions-version 2`") functions_version = '2' if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') if bool(plan) == bool(consumption_plan_location): raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION") SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair') docker_registry_server_url = parse_docker_image_name(deployment_container_image_name) site_config = SiteConfig(app_settings=[]) functionapp_def = Site(location=None, site_config=site_config, tags=tags) client = web_client_factory(cmd.cli_ctx) plan_info = None if runtime is not None: runtime = runtime.lower() if consumption_plan_location: locations = list_consumption_locations(cmd) location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None) if location is None: raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations") functionapp_def.location = consumption_plan_location functionapp_def.kind = 'functionapp' # if os_type is None, the os type is windows is_linux = os_type and os_type.lower() == 'linux' else: # apps with SKU based plan if is_valid_resource_id(plan): parse_result = parse_resource_id(plan) plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name']) else: plan_info = client.app_service_plans.get(resource_group_name, plan) if not plan_info: raise CLIError("The plan '{}' doesn't exist".format(plan)) location = plan_info.location is_linux = plan_info.reserved functionapp_def.server_farm_id = plan functionapp_def.location = location if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name): raise CLIError( "usage error: --runtime RUNTIME required for linux functions apps without custom image.") if runtime: if is_linux and runtime not in LINUX_RUNTIMES: raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}." .format(', '.join(LINUX_RUNTIMES))) if not is_linux and runtime not in WINDOWS_RUNTIMES: raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}." .format(', '.join(WINDOWS_RUNTIMES))) site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime)) if runtime_version is not None: if runtime is None: raise CLIError('Must specify --runtime to use --runtime-version') allowed_versions = FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version][runtime] if runtime_version not in allowed_versions: if runtime == 'dotnet': raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined ' 'by --functions-version. Dotnet version {} is not supported by Functions version {}.' .format(runtime_version, functions_version)) raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and ' '--functions-version {}. Supported versions are: {}.' .format(runtime_version, runtime, functions_version, ', '.join(allowed_versions))) if runtime == 'dotnet': logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by ' '--functions-version. Dotnet version will be %s for this function app.', FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION[functions_version][runtime]) con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account) if is_linux: functionapp_def.kind = 'functionapp,linux' functionapp_def.reserved = True is_consumption = consumption_plan_location is not None if not is_consumption: site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey', value=str(hexlify(urandom(32)).decode()).upper())) if deployment_container_image_name: functionapp_def.kind = 'functionapp,linux,container' site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME', value=deployment_container_image_name)) site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly')) site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value='false')) site_config.linux_fx_version = _format_fx_version(deployment_container_image_name) else: site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value='true')) if runtime not in FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version]: raise CLIError("An appropriate linux image for runtime:'{}', " "functions_version: '{}' was not found".format(runtime, functions_version)) if deployment_container_image_name is None: site_config.linux_fx_version = _get_linux_fx_functionapp(functions_version, runtime, runtime_version) else: functionapp_def.kind = 'functionapp' if runtime == "java": site_config.java_version = _get_java_version_functionapp(functions_version, runtime_version) # adding appsetting to site to make it a function site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value=_get_extension_version_functionapp(functions_version))) site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string)) site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value=_get_website_node_version_functionapp(functions_version, runtime, runtime_version))) if disable_app_insights: site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string)) # If plan is not consumption or elastic premium, we need to set always on if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info): site_config.always_on = True # If plan is elastic premium or windows consumption, we need these app settings is_windows_consumption = consumption_plan_location is not None and not is_linux if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption: site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', value=con_string)) site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower())) create_app_insights = False if app_insights_key is not None: site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY', value=app_insights_key)) elif app_insights is not None: instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights) site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY', value=instrumentation_key)) elif not disable_app_insights: create_app_insights = True poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def) functionapp = LongRunningOperation(cmd.cli_ctx)(poller) if consumption_plan_location and is_linux: logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully " "created but is not active until content is published using " "Azure Portal or the Functions Core Tools.", name) else: _set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) if create_app_insights: try: try_create_application_insights(cmd, functionapp) except Exception: # pylint: disable=broad-except logger.warning('Error while trying to create and configure an Application Insights for the Function App. ' 'Please use the Azure Portal to create and configure the Application Insights, if needed.') update_app_settings(cmd, functionapp.resource_group, functionapp.name, ['AzureWebJobsDashboard={}'.format(con_string)]) if deployment_container_image_name: update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url, deployment_container_image_name, docker_registry_server_user, docker_registry_server_password) return functionapp def _get_extension_version_functionapp(functions_version): if functions_version is not None: return '~{}'.format(functions_version) return '~2' def _get_linux_fx_functionapp(functions_version, runtime, runtime_version): if runtime_version is None: runtime_version = FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION[functions_version][runtime] if runtime == 'dotnet': runtime_version = DOTNET_RUNTIME_VERSION_TO_DOTNET_LINUX_FX_VERSION[runtime_version] else: runtime = runtime.upper() return '{}|{}'.format(runtime, runtime_version) def _get_website_node_version_functionapp(functions_version, runtime, runtime_version): if runtime is None or runtime != 'node': return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version] if runtime_version is not None: return '~{}'.format(runtime_version) return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version] def _get_java_version_functionapp(functions_version, runtime_version): if runtime_version is None: runtime_version = FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION[functions_version]['java'] if runtime_version == '8': return '1.8' return runtime_version def try_create_application_insights(cmd, functionapp): creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \ 'Please use the Azure Portal to manually create and configure the Application Insights, ' \ 'if needed.' ai_resource_group_name = functionapp.resource_group ai_name = functionapp.name ai_location = functionapp.location app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient) ai_properties = { "name": ai_name, "location": ai_location, "kind": "web", "properties": { "Application_Type": "web" } } appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties) if appinsights is None or appinsights.instrumentation_key is None: logger.warning(creation_failed_warn) return # We make this success message as a warning to no interfere with regular JSON output in stdout logger.warning('Application Insights \"%s\" was created for this Function App. ' 'You can visit https://portal.azure.com/#resource%s/overview to view your ' 'Application Insights component', appinsights.name, appinsights.id) update_app_settings(cmd, functionapp.resource_group, functionapp.name, ['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)]) def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None): if deployment_source_url: logger.warning("Linking to git repository '%s'", deployment_source_url) try: config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git', deployment_source_branch, manual_integration=True) except Exception as ex: # pylint: disable=broad-except ex = ex_handler_factory(no_throw=True)(ex) logger.warning("Link to git repository failed due to error '%s'", ex) if deployment_local_git: local_git_info = enable_local_git(cmd, resource_group_name, name) logger.warning("Local git is configured with url of '%s'", local_git_info['url']) setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url']) def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account): sa_resource_group = resource_group_name if is_valid_resource_id(storage_account): sa_resource_group = parse_resource_id(storage_account)['resource_group'] storage_account = parse_resource_id(storage_account)['name'] storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient) storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group, storage_account) error_message = '' endpoints = storage_properties.primary_endpoints sku = storage_properties.sku.name allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS'] for e in ['blob', 'queue', 'table']: if not getattr(endpoints, e, None): error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long if sku not in allowed_storage_types: error_message += 'Storage type {} is not allowed'.format(sku) if error_message: raise CLIError(error_message) obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member try: keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member except AttributeError: # Older API versions have a slightly different structure keys = [obj.key1, obj.key2] # pylint: disable=no-member endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format( "https", endpoint_suffix, storage_account, keys[0]) # pylint: disable=no-member return connection_string def list_consumption_locations(cmd): client = web_client_factory(cmd.cli_ctx) regions = client.list_geo_regions(sku='Dynamic') return [{'name': x.name.lower().replace(' ', '')} for x in regions] def list_locations(cmd, sku, linux_workers_enabled=None): client = web_client_factory(cmd.cli_ctx) full_sku = get_sku_name(sku) return client.list_geo_regions(full_sku, linux_workers_enabled) def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None): import requests from azure.cli.core.util import should_disable_connection_verify total_trials = (int(timeout) // 2) if timeout else 450 num_trials = 0 while num_trials < total_trials: time.sleep(2) response = requests.get(deployment_status_url, headers=authorization, verify=not should_disable_connection_verify()) try: res_dict = response.json() except json.decoder.JSONDecodeError: logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url) res_dict = {} finally: num_trials = num_trials + 1 if res_dict.get('status', 0) == 3: _configure_default_logging(cmd, rg_name, name) raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail -n {} -g {}""".format(res_dict, name, rg_name)) if res_dict.get('status', 0) == 4: break if 'progress' in res_dict: logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing # if the deployment is taking longer than expected if res_dict.get('status', 0) != 4: _configure_default_logging(cmd, rg_name, name) raise CLIError("""Timeout reached by the command, however, the deployment operation is still on-going. Navigate to your scm site to check the deployment status""") return res_dict def list_continuous_webjobs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot) def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name) def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name) def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name) def list_triggered_webjobs(cmd, resource_group_name, name, slot=None): return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot) def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name) return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name) def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name) def list_hc(cmd, name, resource_group_name, slot=None): linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") client = web_client_factory(cmd.cli_ctx) if slot is None: listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name) else: listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot) # reformats hybrid connection, to prune unnecessary fields mod_list = [] for x in listed_vals.additional_properties["value"]: properties = x["properties"] resourceGroup = x["id"].split("/") mod_hc = { "id": x["id"], "location": x["location"], "name": x["name"], "properties": { "hostname": properties["hostname"], "port": properties["port"], "relayArmUri": properties["relayArmUri"], "relayName": properties["relayName"], "serviceBusNamespace": properties["serviceBusNamespace"], "serviceBusSuffix": properties["serviceBusSuffix"] }, "resourceGroup": resourceGroup[4], "type": x["type"] } mod_list.append(mod_hc) return mod_list def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None): HybridConnection = cmd.get_models('HybridConnection') linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") web_client = web_client_factory(cmd.cli_ctx) hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) hy_co_id = '' for n in namespace_client.list(): if n.name == namespace: hy_co_id = n.id i = 0 hy_co_resource_group = '' hy_co_split = hy_co_id.split("/") for z in hy_co_split: if z == "resourceGroups": hy_co_resource_group = hy_co_split[i + 1] i = i + 1 # calling the relay API to get information about the hybrid connection hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection) # if the hybrid connection does not have a default sender authorization # rule, create it hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection) has_default_sender_key = False for r in hy_co_rules: if r.name.lower() == "defaultsender": for z in r.rights: if z == z.send: has_default_sender_key = True if not has_default_sender_key: rights = [AccessRights.send] hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection, "defaultSender", rights) hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender") hy_co_info = hy_co.id hy_co_metadata = ast.literal_eval(hy_co.user_metadata) hy_co_hostname = '' for x in hy_co_metadata: if x["key"] == "endpoint": hy_co_hostname = x["value"] hostname_parts = hy_co_hostname.split(":") hostname = hostname_parts[0] port = hostname_parts[1] id_parameters = hy_co_info.split("/") # populate object with information from the hybrid connection, and set it # on webapp hc = HybridConnection(service_bus_namespace=id_parameters[8], relay_name=hybrid_connection, relay_arm_uri=hy_co_info, hostname=hostname, port=port, send_key_name="defaultSender", send_key_value=hy_co_keys.primary_key, service_bus_suffix=".servicebus.windows.net") if slot is None: return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace, hybrid_connection, hc) else: return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace, hybrid_connection, hc, slot) # reformats hybrid connection, to prune unnecessary fields resourceGroup = return_hc.id.split("/") mod_hc = { "hostname": return_hc.hostname, "id": return_hc.id, "location": return_hc.additional_properties["location"], "name": return_hc.name, "port": return_hc.port, "relayArmUri": return_hc.relay_arm_uri, "resourceGroup": resourceGroup[4], "serviceBusNamespace": return_hc.service_bus_namespace, "serviceBusSuffix": return_hc.service_bus_suffix } return mod_hc # set the key the apps use to connect with the hybrid connection def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type): HybridConnection = cmd.get_models('HybridConnection') web_client = web_client_factory(cmd.cli_ctx) # extract the hybrid connection resource group asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) arm_uri = asp_hy_co.relay_arm_uri split_uri = arm_uri.split("resourceGroups/") resource_group_strings = split_uri[1].split('/') relay_resource_group = resource_group_strings[0] hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx) # calling the relay function to obtain information about the hc in question hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection) # if the hybrid connection does not have a default sender authorization # rule, create it hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection) has_default_sender_key = False for r in hy_co_rules: if r.name.lower() == "defaultsender": for z in r.rights: if z == z.send: has_default_sender_key = True if not has_default_sender_key: rights = [AccessRights.send] hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection, "defaultSender", rights) hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender") hy_co_metadata = ast.literal_eval(hy_co.user_metadata) hy_co_hostname = 0 for x in hy_co_metadata: if x["key"] == "endpoint": hy_co_hostname = x["value"] hostname_parts = hy_co_hostname.split(":") hostname = hostname_parts[0] port = hostname_parts[1] key = "empty" if key_type.lower() == "primary": key = hy_co_keys.primary_key elif key_type.lower() == "secondary": key = hy_co_keys.secondary_key # enures input is correct if key == "empty": logger.warning("Key type is invalid - must be primary or secondary") return apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) # changes the key for every app that uses that hybrid connection for x in apps: app_info = ast.literal_eval(x) app_name = app_info["name"] app_id = app_info["id"] id_split = app_id.split("/") app_resource_group = id_split[4] hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection, relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender", send_key_value=key) web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace, hybrid_connection, hc) return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace, hybrid_connection) def appservice_list_vnet(cmd, resource_group_name, plan): web_client = web_client_factory(cmd.cli_ctx) return web_client.app_service_plans.list_vnets(resource_group_name, plan) def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None): linux_webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = linux_webapp.reserved if is_linux: return logger.warning("hybrid connections not supported on a linux app.") client = web_client_factory(cmd.cli_ctx) if slot is None: return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection) else: return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace, hybrid_connection, slot) return return_hc def list_vnet_integration(cmd, name, resource_group_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: result = list(client.web_apps.list_vnet_connections(resource_group_name, name)) else: result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot)) mod_list = [] # reformats the vnet entry, removing unecessary information for x in result: # removes GUIDs from name and id longName = x.name if '_' in longName: usIndex = longName.index('_') shortName = longName[usIndex + 1:] else: shortName = longName v_id = x.id lastSlash = v_id.rindex('/') shortId = v_id[:lastSlash] + '/' + shortName # extracts desired fields certThumbprint = x.cert_thumbprint location = x.additional_properties["location"] v_type = x.type vnet_resource_id = x.vnet_resource_id id_strings = v_id.split('/') resourceGroup = id_strings[4] routes = x.routes vnet_mod = {"certThumbprint": certThumbprint, "id": shortId, "location": location, "name": shortName, "resourceGroup": resourceGroup, "routes": routes, "type": v_type, "vnetResourceId": vnet_resource_id} mod_list.append(vnet_mod) return mod_list def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None): SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork') Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK) client = web_client_factory(cmd.cli_ctx) vnet_client = network_client_factory(cmd.cli_ctx) list_all_vnets = vnet_client.virtual_networks.list_all() vnet_id = '' for v in list_all_vnets: if v.name == vnet: vnet_id = v.id # parsing the arm uri in order to extract vnet_name and vnet_resource_group vnet_id_strings = vnet_id.split('/') vnet_resource_group = '' i = 0 for z in vnet_id_strings: if z.lower() == "resourcegroups": vnet_resource_group = vnet_id_strings[i + 1] i = i + 1 if slot is None: swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name) else: swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name, name, slot) # check to see if the connection would be supported if swift_connection_info.swift_supported is not True: return logger.warning("""Your app must be in an Azure App Service deployment that is capable of scaling up to Premium v2\nLearn more: https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""") subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet) delegations = subnetObj.delegations delegated = False for d in delegations: if d.service_name.lower() == "microsoft.web/serverfarms".lower(): delegated = True if not delegated: subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")] vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet, subnet_parameters=subnetObj) id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet) subnet_resource_id = id_subnet.id swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id, swift_supported=True) if slot is None: return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name, swiftVnet) else: return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name, swiftVnet, slot) # reformats the vnet entry, removing unecessary information id_strings = return_vnet.id.split('/') resourceGroup = id_strings[4] mod_vnet = { "id": return_vnet.id, "location": return_vnet.additional_properties["location"], "name": return_vnet.name, "resourceGroup": resourceGroup, "subnetResourceId": return_vnet.subnet_resource_id } return mod_vnet def remove_vnet_integration(cmd, name, resource_group_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot is None: return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name) else: return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot) return return_vnet def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None): client = web_client_factory(cmd.cli_ctx) if slot: return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot) return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name) def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements, launch_browser=False, html=False): import os AppServicePlan = cmd.get_models('AppServicePlan') src_dir = os.getcwd() _src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep)) client = web_client_factory(cmd.cli_ctx) user = get_profile_username() _create_new_rg = False _create_new_app = does_app_already_exist(cmd, name) os_name = detect_os_form_src(src_dir, html) lang_details = get_lang_from_content(src_dir, html) language = lang_details.get('language') # detect the version data = get_runtime_version_details(lang_details.get('file_loc'), language) version_used_create = data.get('to_create') detected_version = data.get('detected') runtime_version = "{}|{}".format(language, version_used_create) if \ version_used_create != "-" else version_used_create site_config = None if not _create_new_app: # App exists # Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name) app_details = get_app_details(cmd, name) if app_details is None: raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of " "the current subscription".format(name)) current_rg = app_details.resource_group if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()): raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please " "re-run command with the correct parameters.". format(name, current_rg, resource_group_name)) rg_name = resource_group_name or current_rg if location is None: loc = app_details.location.replace(" ", "").lower() else: loc = location.replace(" ", "").lower() plan_details = parse_resource_id(app_details.server_farm_id) current_plan = plan_details['name'] if plan is not None and current_plan.lower() != plan.lower(): raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}." "Please check if you have configured defaults for plan name and re-run command." .format(plan, current_plan)) plan = plan or plan_details['name'] plan_info = client.app_service_plans.get(rg_name, plan) sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free' current_os = 'Linux' if plan_info.reserved else 'Windows' # Raise error if current OS of the app is different from the current one if current_os.lower() != os_name.lower(): raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to " "'{}'. " "Please create a new app to continue this operation.".format(name, current_os, src_dir, os)) _is_linux = plan_info.reserved # for an existing app check if the runtime version needs to be updated # Get site config to check the runtime version site_config = client.web_apps.get_configuration(rg_name, name) else: # need to create new app, check if we need to use default RG or use user entered values logger.warning("webapp %s doesn't exist", name) sku = get_sku_to_use(src_dir, html, sku) loc = set_location(cmd, sku, location) rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name) _is_linux = os_name.lower() == 'linux' _create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux) plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan) dry_run_str = r""" { "name" : "%s", "appserviceplan" : "%s", "resourcegroup" : "%s", "sku": "%s", "os": "%s", "location" : "%s", "src_path" : "%s", "runtime_version_detected": "%s", "runtime_version": "%s" } """ % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version, runtime_version) create_json = json.loads(dry_run_str) if dryrun: logger.warning("Web app will be created with the below configuration,re-run command " "without the --dryrun flag to create & deploy a new app") return create_json if _create_new_rg: logger.warning("Creating Resource group '%s' ...", rg_name) create_resource_group(cmd, rg_name, loc) logger.warning("Resource group creation complete") # create ASP logger.warning("Creating AppServicePlan '%s' ...", plan) # we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are # updated we update those create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku, number_of_workers=1 if _is_linux else None, location=loc) if _create_new_app: logger.warning("Creating webapp '%s' ...", name) create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, using_webapp_up=True, language=language) _configure_default_logging(cmd, rg_name, name) else: # for existing app if we might need to update the stack runtime settings if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version: logger.warning('Updating runtime version from %s to %s', site_config.linux_fx_version, runtime_version) update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version) elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version: logger.warning('Updating runtime version from %s to %s', site_config.windows_fx_version, runtime_version) update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version) create_json['runtime_version'] = runtime_version # Zip contents & Deploy logger.warning("Creating zip with contents of dir %s ...", src_dir) # zip contents & deploy zip_file_path = zip_contents_from_dir(src_dir, language) enable_zip_deploy(cmd, rg_name, name, zip_file_path) # Remove the file after deployment, handling exception if user removed the file manually try: os.remove(zip_file_path) except OSError: pass if launch_browser: logger.warning("Launching app using default browser") view_in_browser(cmd, rg_name, name, None, logs) else: _url = _get_url(cmd, rg_name, name) logger.warning("You can launch the app at %s", _url) create_json.update({'URL': _url}) if logs: _configure_default_logging(cmd, rg_name, name) return get_streaming_log(cmd, rg_name, name) with ConfiguredDefaultSetter(cmd.cli_ctx.config, True): cmd.cli_ctx.config.set_value('defaults', 'group', rg_name) cmd.cli_ctx.config.set_value('defaults', 'sku', sku) cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan) cmd.cli_ctx.config.set_value('defaults', 'location', loc) cmd.cli_ctx.config.set_value('defaults', 'web', name) return create_json def _ping_scm_site(cmd, resource_group, name): from azure.cli.core.util import should_disable_connection_verify # wake up kudu, by making an SCM call import requests # work around until the timeout limits issue for linux is investigated & fixed user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name) scm_url = _get_scm_url(cmd, resource_group, name) import urllib3 authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password)) requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify()) def is_webapp_up(tunnel_server): return tunnel_server.is_webapp_up() def get_tunnel(cmd, resource_group_name, name, port=None, slot=None): webapp = show_webapp(cmd, resource_group_name, name, slot) is_linux = webapp.reserved if not is_linux: raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan") profiles = list_publish_profiles(cmd, resource_group_name, name, slot) profile_user_name = next(p['userName'] for p in profiles) profile_user_password = next(p['userPWD'] for p in profiles) if port is None: port = 0 # Will auto-select a free port from 1024-65535 logger.info('No port defined, creating on random free port') scm_url = _get_scm_url(cmd, resource_group_name, name, slot) tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password) _ping_scm_site(cmd, resource_group_name, name) _wait_for_webapp(tunnel_server) return tunnel_server def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None): tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot) t = threading.Thread(target=_start_tunnel, args=(tunnel_server,)) t.daemon = True t.start() logger.warning('Opening tunnel on port: %s', tunnel_server.local_port) config = get_site_configs(cmd, resource_group_name, name, slot) if config.remote_debugging_enabled: logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port) else: ssh_user_name = 'root' ssh_user_password = 'Docker!' logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password) logger.warning('Ctrl + C to close') if timeout: time.sleep(int(timeout)) else: while t.isAlive(): time.sleep(5) def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None): tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot) t = threading.Thread(target=_start_tunnel, args=(tunnel_server,)) t.daemon = True t.start() ssh_user_name = 'root' ssh_user_password = 'Docker!' s = threading.Thread(target=_start_ssh_session, args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password)) s.daemon = True s.start() if timeout: time.sleep(int(timeout)) else: while s.isAlive() and t.isAlive(): time.sleep(5) def _wait_for_webapp(tunnel_server): tries = 0 while True: if is_webapp_up(tunnel_server): break if tries == 0: logger.warning('Connection is not ready yet, please wait') if tries == 60: raise CLIError("Timeout Error, Unable to establish a connection") tries = tries + 1 logger.warning('.') time.sleep(1) def _start_tunnel(tunnel_server): tunnel_server.start_server() def _start_ssh_session(hostname, port, username, password): tries = 0 while True: try: c = Connection(host=hostname, port=port, user=username, # connect_timeout=60*10, connect_kwargs={"password": password}) break except Exception as ex: # pylint: disable=broad-except logger.info(ex) if tries == 0: logger.warning('Connection is not ready yet, please wait') if tries == 60: raise CLIError("Timeout Error, Unable to establish a connection") tries = tries + 1 logger.warning('.') time.sleep(1) try: c.run('cat /etc/motd', pty=True) c.run('source /etc/profile; exec $SHELL -l', pty=True) except Exception as ex: # pylint: disable=broad-except logger.info(ex) finally: c.close() def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements import platform if platform.system() == "Windows": raise CLIError('webapp ssh is only supported on linux and mac') config = get_site_configs(cmd, resource_group_name, name, slot) if config.remote_debugging_enabled: raise CLIError('remote debugging is enabled, please disable') create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout) def create_devops_pipeline( cmd, functionapp_name=None, organization_name=None, project_name=None, repository_name=None, overwrite_yaml=None, allow_force_push=None, github_pat=None, github_repository=None ): from .azure_devops_build_interactive import AzureDevopsBuildInteractive azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name, organization_name, project_name, repository_name, overwrite_yaml, allow_force_push, github_pat, github_repository) return azure_devops_build_interactive.interactive_azure_devops_build() def _configure_default_logging(cmd, rg_name, name): logger.warning("Configuring default logging for the app, if not already enabled") return config_diagnostics(cmd, rg_name, name, application_logging=True, web_server_logging='filesystem', docker_container_logging='true') def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name): ase_is_id = is_valid_resource_id(ase) if ase_is_id: return ase from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id return resource_id( subscription=get_subscription_id(cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Web', type='hostingEnvironments', name=ase) def _validate_asp_sku(app_service_environment, sku): # Isolated SKU is supported only for ASE if sku in ['I1', 'I2', 'I3']: if not app_service_environment: raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to " "learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans") else: if app_service_environment: raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to " "learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans") def _format_key_vault_id(cli_ctx, key_vault, resource_group_name): key_vault_is_id = is_valid_resource_id(key_vault) if key_vault_is_id: return key_vault from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id return resource_id( subscription=get_subscription_id(cli_ctx), resource_group=resource_group_name, namespace='Microsoft.KeyVault', type='vaults', name=key_vault) def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None): hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_host_name_bindings', slot) verified_hostname_found = False for hostname_binding in hostname_bindings: binding_name = hostname_binding.name.split('/')[-1] if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified': verified_hostname_found = True return verified_hostname_found
__init__.py
from __future__ import print_function import argparse import itertools import os import random import re import shlex import string import sys import traceback import warnings from collections import OrderedDict from fnmatch import fnmatchcase from subprocess import list2cmdline from threading import Thread import importlib_metadata import pluggy import py import toml from packaging import requirements from packaging.utils import canonicalize_name import tox from tox.constants import INFO from tox.exception import MissingDependency from tox.interpreters import Interpreters, NoInterpreterInfo from tox.reporter import ( REPORTER_TIMESTAMP_ON_ENV, error, update_default_reporter, using, verbosity1, ) from tox.util.path import ensure_empty_dir from .parallel import ENV_VAR_KEY as PARALLEL_ENV_VAR_KEY from .parallel import add_parallel_config, add_parallel_flags from .reporter import add_verbosity_commands try: from shlex import quote as shlex_quote except ImportError: from pipes import quote as shlex_quote hookimpl = tox.hookimpl """DEPRECATED - REMOVE - left for compatibility with plugins importing from here. Import hookimpl directly from tox instead. """ default_factors = tox.PYTHON.DEFAULT_FACTORS """DEPRECATED MOVE - please update to new location.""" WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1" def get_plugin_manager(plugins=()): # initialize plugin manager import tox.venv pm = pluggy.PluginManager("tox") pm.add_hookspecs(tox.hookspecs) pm.register(tox.config) pm.register(tox.interpreters) pm.register(tox.venv) pm.register(tox.session) from tox import package pm.register(package) pm.load_setuptools_entrypoints("tox") for plugin in plugins: pm.register(plugin) pm.check_pending() return pm class Parser: """Command line and ini-parser control object.""" def __init__(self): class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter): def __init__(self, prog): super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190) self.argparser = argparse.ArgumentParser( description="tox options", add_help=False, prog="tox", formatter_class=HelpFormatter ) self._testenv_attr = [] def add_argument(self, *args, **kwargs): """ add argument to command line parser. This takes the same arguments that ``argparse.ArgumentParser.add_argument``. """ return self.argparser.add_argument(*args, **kwargs) def add_testenv_attribute(self, name, type, help, default=None, postprocess=None): """ add an ini-file variable for "testenv" section. Types are specified as strings like "bool", "line-list", "string", "argv", "path", "argvlist". The ``postprocess`` function will be called for each testenv like ``postprocess(testenv_config=testenv_config, value=value)`` where ``value`` is the value as read from the ini (or the default value) and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance which will receive all ini-variables as object attributes. Any postprocess function must return a value which will then be set as the final value in the testenv section. """ self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess)) def add_testenv_attribute_obj(self, obj): """ add an ini-file variable as an object. This works as the ``add_testenv_attribute`` function but expects "name", "type", "help", and "postprocess" attributes on the object. """ assert hasattr(obj, "name") assert hasattr(obj, "type") assert hasattr(obj, "help") assert hasattr(obj, "postprocess") self._testenv_attr.append(obj) def parse_cli(self, args, strict=False): args, argv = self.argparser.parse_known_args(args) if argv and (strict or WITHIN_PROVISION): self.argparser.error("unrecognized arguments: {}".format(" ".join(argv))) return args def _format_help(self): return self.argparser.format_help() class VenvAttribute: def __init__(self, name, type, default, help, postprocess): self.name = name self.type = type self.default = default self.help = help self.postprocess = postprocess class DepOption: name = "deps" type = "line-list" help = "each line specifies a dependency in pip/setuptools format." default = () def postprocess(self, testenv_config, value): deps = [] config = testenv_config.config for depline in value: m = re.match(r":(\w+):\s*(\S+)", depline) if m: iname, name = m.groups() ixserver = config.indexserver[iname] else: name = depline.strip() ixserver = None # we need to process options, in case they contain a space, # as the subprocess call to pip install will otherwise fail. # in case of a short option, we remove the space for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT: if name.startswith(option): name = "{}{}".format(option, name[len(option) :].strip()) # in case of a long option, we add an equal sign for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT: name_start = "{} ".format(option) if name.startswith(name_start): name = "{}={}".format(option, name[len(option) :].strip()) name = self._cut_off_dep_comment(name) name = self._replace_forced_dep(name, config) deps.append(DepConfig(name, ixserver)) return deps def _replace_forced_dep(self, name, config): """Override given dependency config name. Take ``--force-dep-version`` option into account. :param name: dep config, for example ["pkg==1.0", "other==2.0"]. :param config: ``Config`` instance :return: the new dependency that should be used for virtual environments """ if not config.option.force_dep: return name for forced_dep in config.option.force_dep: if self._is_same_dep(forced_dep, name): return forced_dep return name @staticmethod def _cut_off_dep_comment(name): return re.sub(r"\s+#.*", "", name).strip() @classmethod def _is_same_dep(cls, dep1, dep2): """Definitions are the same if they refer to the same package, even if versions differ.""" dep1_name = canonicalize_name(requirements.Requirement(dep1).name) try: dep2_name = canonicalize_name(requirements.Requirement(dep2).name) except requirements.InvalidRequirement: # we couldn't parse a version, probably a URL return False return dep1_name == dep2_name class PosargsOption: name = "args_are_paths" type = "bool" default = True help = "treat positional args in commands as paths" def postprocess(self, testenv_config, value): config = testenv_config.config args = config.option.args if args: if value: args = [] for arg in config.option.args: if arg and not os.path.isabs(arg): origpath = os.path.join(config.invocationcwd.strpath, arg) if os.path.exists(origpath): arg = os.path.relpath(origpath, testenv_config.changedir.strpath) args.append(arg) testenv_config._reader.addsubstitutions(args) return value class InstallcmdOption: name = "install_command" type = "argv" default = "python -m pip install {opts} {packages}" help = "install command for dependencies and package under test." def postprocess(self, testenv_config, value): if "{packages}" not in value: raise tox.exception.ConfigError( "'install_command' must contain '{packages}' substitution" ) return value def parseconfig(args, plugins=()): """Parse the configuration file and create a Config object. :param plugins: :param list[str] args: list of arguments. :rtype: :class:`Config` :raise SystemExit: toxinit file is not found """ pm = get_plugin_manager(plugins) config, option = parse_cli(args, pm) update_default_reporter(config.option.quiet_level, config.option.verbose_level) for config_file in propose_configs(option.configfile): config_type = config_file.basename content = None if config_type == "pyproject.toml": toml_content = get_py_project_toml(config_file) try: content = toml_content["tool"]["tox"]["legacy_tox_ini"] except KeyError: continue ParseIni(config, config_file, content) pm.hook.tox_configure(config=config) # post process config object break else: msg = "tox config file (either {}) not found" candidates = ", ".join(INFO.CONFIG_CANDIDATES) feedback(msg.format(candidates), sysexit=not (option.help or option.helpini)) return config def get_py_project_toml(path): with open(str(path)) as file_handler: config_data = toml.load(file_handler) return config_data def propose_configs(cli_config_file): from_folder = py.path.local() if cli_config_file is not None: if os.path.isfile(cli_config_file): yield py.path.local(cli_config_file) return if os.path.isdir(cli_config_file): from_folder = py.path.local(cli_config_file) else: print( "ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr ) return for basename in INFO.CONFIG_CANDIDATES: if from_folder.join(basename).isfile(): yield from_folder.join(basename) for path in from_folder.parts(reverse=True): ini_path = path.join(basename) if ini_path.check(): yield ini_path def parse_cli(args, pm): parser = Parser() pm.hook.tox_addoption(parser=parser) option = parser.parse_cli(args) if option.version: print(get_version_info(pm)) raise SystemExit(0) interpreters = Interpreters(hook=pm.hook) config = Config( pluginmanager=pm, option=option, interpreters=interpreters, parser=parser, args=args ) return config, option def feedback(msg, sysexit=False): print("ERROR: {}".format(msg), file=sys.stderr) if sysexit: raise SystemExit(1) def get_version_info(pm): out = ["{} imported from {}".format(tox.__version__, tox.__file__)] plugin_dist_info = pm.list_plugin_distinfo() if plugin_dist_info: out.append("registered plugins:") for mod, egg_info in plugin_dist_info: source = getattr(mod, "__file__", repr(mod)) out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source)) return "\n".join(out) class SetenvDict(object): _DUMMY = object() def __init__(self, definitions, reader): self.definitions = definitions self.reader = reader self.resolved = {} self._lookupstack = [] def __repr__(self): return "{}: {}".format(self.__class__.__name__, self.definitions) def __contains__(self, name): return name in self.definitions def get(self, name, default=None): try: return self.resolved[name] except KeyError: try: if name in self._lookupstack: raise KeyError(name) val = self.definitions[name] except KeyError: return os.environ.get(name, default) self._lookupstack.append(name) try: self.resolved[name] = res = self.reader._replace(val) finally: self._lookupstack.pop() return res def __getitem__(self, name): x = self.get(name, self._DUMMY) if x is self._DUMMY: raise KeyError(name) return x def keys(self): return self.definitions.keys() def __setitem__(self, name, value): self.definitions[name] = value self.resolved[name] = value @tox.hookimpl def tox_addoption(parser): parser.add_argument( "--version", action="store_true", help="report version information to stdout." ) parser.add_argument("-h", "--help", action="store_true", help="show help about options") parser.add_argument( "--help-ini", "--hi", action="store_true", dest="helpini", help="show help about ini-names" ) add_verbosity_commands(parser) parser.add_argument( "--showconfig", action="store_true", help="show live configuration (by default all env, with -l only default targets," " specific via TOXENV/-e)", ) parser.add_argument( "-l", "--listenvs", action="store_true", help="show list of test environments (with description if verbose)", ) parser.add_argument( "-a", "--listenvs-all", action="store_true", help="show list of all defined environments (with description if verbose)", ) parser.add_argument( "-c", dest="configfile", help="config file name or directory with 'tox.ini' file." ) parser.add_argument( "-e", action="append", dest="env", metavar="envlist", help="work against specified environments (ALL selects all).", ) parser.add_argument( "--devenv", metavar="ENVDIR", help=( "sets up a development environment at ENVDIR based on the env's tox " "configuration specified by `-e` (-e defaults to py)." ), ) parser.add_argument("--notest", action="store_true", help="skip invoking test commands.") parser.add_argument( "--sdistonly", action="store_true", help="only perform the sdist packaging activity." ) add_parallel_flags(parser) parser.add_argument( "--parallel--safe-build", action="store_true", dest="parallel_safe_build", help="(deprecated) ensure two tox builds can run in parallel " "(uses a lock file in the tox workdir with .lock extension)", ) parser.add_argument( "--installpkg", metavar="PATH", help="use specified package for installation into venv, instead of creating an sdist.", ) parser.add_argument( "--develop", action="store_true", help="install package in the venv using 'setup.py develop' via 'pip -e .'", ) parser.add_argument( "-i", "--index-url", action="append", dest="indexurl", metavar="URL", help="set indexserver url (if URL is of form name=url set the " "url for the 'name' indexserver, specifically)", ) parser.add_argument( "--pre", action="store_true", help="install pre-releases and development versions of dependencies. " "This will pass the --pre option to install_command " "(pip by default).", ) parser.add_argument( "-r", "--recreate", action="store_true", help="force recreation of virtual environments" ) parser.add_argument( "--result-json", dest="resultjson", metavar="PATH", help="write a json file with detailed information " "about all commands and results involved.", ) # We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED. parser.add_argument( "--hashseed", metavar="SEED", help="set PYTHONHASHSEED to SEED before running commands. " "Defaults to a random integer in the range [1, 4294967295] " "([1, 1024] on Windows). " "Passing 'noset' suppresses this behavior.", ) parser.add_argument( "--force-dep", action="append", metavar="REQ", help="Forces a certain version of one of the dependencies " "when configuring the virtual environment. REQ Examples " "'pytest<2.7' or 'django>=1.6'.", ) parser.add_argument( "--sitepackages", action="store_true", help="override sitepackages setting to True in all envs", ) parser.add_argument( "--alwayscopy", action="store_true", help="override alwayscopy setting to True in all envs" ) cli_skip_missing_interpreter(parser) parser.add_argument("--workdir", metavar="PATH", help="tox working directory") parser.add_argument( "args", nargs="*", help="additional arguments available to command positional substitution" ) def _set_envdir_from_devenv(testenv_config, value): if testenv_config.config.option.devenv is not None: return py.path.local(testenv_config.config.option.devenv) else: return value parser.add_testenv_attribute( name="envdir", type="path", default="{toxworkdir}/{envname}", help="set venv directory -- be very careful when changing this as tox " "will remove this directory when recreating an environment", postprocess=_set_envdir_from_devenv, ) # add various core venv interpreter attributes def setenv(testenv_config, value): setenv = value config = testenv_config.config if "PYTHONHASHSEED" not in setenv and config.hashseed is not None: setenv["PYTHONHASHSEED"] = config.hashseed setenv["TOX_ENV_NAME"] = str(testenv_config.envname) setenv["TOX_ENV_DIR"] = str(testenv_config.envdir) return setenv parser.add_testenv_attribute( name="setenv", type="dict_setenv", postprocess=setenv, help="list of X=Y lines with environment variable settings", ) def basepython_default(testenv_config, value): """either user set or proposed from the factor name in both cases we check that the factor name implied python version and the resolved python interpreter version match up; if they don't we warn, unless ignore base python conflict is set in which case the factor name implied version if forced """ for factor in testenv_config.factors: if factor in tox.PYTHON.DEFAULT_FACTORS: implied_python = tox.PYTHON.DEFAULT_FACTORS[factor] break else: implied_python, factor = None, None if testenv_config.config.ignore_basepython_conflict and implied_python is not None: return implied_python proposed_python = (implied_python or sys.executable) if value is None else str(value) if implied_python is not None and implied_python != proposed_python: testenv_config.basepython = proposed_python match = tox.PYTHON.PY_FACTORS_RE.match(factor) implied_version = match.group(2) if match else None if implied_version is not None: python_info_for_proposed = testenv_config.python_info if not isinstance(python_info_for_proposed, NoInterpreterInfo): proposed_version = "".join( str(i) for i in python_info_for_proposed.version_info[0:2] ) # '27'.startswith('2') or '27'.startswith('27') if not proposed_version.startswith(implied_version): # TODO(stephenfin): Raise an exception here in tox 4.0 warnings.warn( "conflicting basepython version (set {}, should be {}) for env '{}';" "resolve conflict or set ignore_basepython_conflict".format( proposed_version, implied_version, testenv_config.envname ) ) return proposed_python parser.add_testenv_attribute( name="basepython", type="basepython", default=None, postprocess=basepython_default, help="executable name or path of interpreter used to create a virtual test environment.", ) def merge_description(testenv_config, value): """the reader by default joins generated description with new line, replace new line with space""" return value.replace("\n", " ") parser.add_testenv_attribute( name="description", type="string", default="", postprocess=merge_description, help="short description of this environment", ) parser.add_testenv_attribute( name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory" ) parser.add_testenv_attribute( name="envlogdir", type="path", default="{envdir}/log", help="venv log directory" ) parser.add_testenv_attribute( name="downloadcache", type="string", default=None, help="(ignored) has no effect anymore, pip-8 uses local caching by default", ) parser.add_testenv_attribute( name="changedir", type="path", default="{toxinidir}", help="directory to change to when running commands", ) parser.add_testenv_attribute_obj(PosargsOption()) parser.add_testenv_attribute( name="skip_install", type="bool", default=False, help="Do not install the current package. This can be used when you need the virtualenv " "management but do not want to install the current package", ) parser.add_testenv_attribute( name="ignore_errors", type="bool", default=False, help="if set to True all commands will be executed irrespective of their result error " "status.", ) def recreate(testenv_config, value): if testenv_config.config.option.recreate: return True return value parser.add_testenv_attribute( name="recreate", type="bool", default=False, postprocess=recreate, help="always recreate this test environment.", ) def passenv(testenv_config, value): # Flatten the list to deal with space-separated values. value = list(itertools.chain.from_iterable([x.split(" ") for x in value])) passenv = { "PATH", "PIP_INDEX_URL", "LANG", "LANGUAGE", "LD_LIBRARY_PATH", "TOX_WORK_DIR", str(REPORTER_TIMESTAMP_ON_ENV), str(PARALLEL_ENV_VAR_KEY), } # read in global passenv settings p = os.environ.get("TOX_TESTENV_PASSENV", None) if p is not None: env_values = [x for x in p.split() if x] value.extend(env_values) # we ensure that tmp directory settings are passed on # we could also set it to the per-venv "envtmpdir" # but this leads to very long paths when run with jenkins # so we just pass it on by default for now. if tox.INFO.IS_WIN: passenv.add("SYSTEMDRIVE") # needed for pip6 passenv.add("SYSTEMROOT") # needed for python's crypto module passenv.add("PATHEXT") # needed for discovering executables passenv.add("COMSPEC") # needed for distutils cygwincompiler passenv.add("TEMP") passenv.add("TMP") # for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4). passenv.add("NUMBER_OF_PROCESSORS") passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine() passenv.add("USERPROFILE") # needed for `os.path.expanduser()` passenv.add("MSYSTEM") # fixes #429 else: passenv.add("TMPDIR") for spec in value: for name in os.environ: if fnmatchcase(name.upper(), spec.upper()): passenv.add(name) return passenv parser.add_testenv_attribute( name="passenv", type="line-list", postprocess=passenv, help="environment variables needed during executing test commands (taken from invocation " "environment). Note that tox always passes through some basic environment variables " "which are needed for basic functioning of the Python system. See --showconfig for the " "eventual passenv setting.", ) parser.add_testenv_attribute( name="whitelist_externals", type="line-list", help="each lines specifies a path or basename for which tox will not warn " "about it coming from outside the test environment.", ) parser.add_testenv_attribute( name="platform", type="string", default=".*", help="regular expression which must match against ``sys.platform``. " "otherwise testenv will be skipped.", ) def sitepackages(testenv_config, value): return testenv_config.config.option.sitepackages or value def alwayscopy(testenv_config, value): return testenv_config.config.option.alwayscopy or value parser.add_testenv_attribute( name="sitepackages", type="bool", default=False, postprocess=sitepackages, help="Set to ``True`` if you want to create virtual environments that also " "have access to globally installed packages.", ) parser.add_testenv_attribute( "download", type="bool", default=False, help="download the latest pip, setuptools and wheel when creating the virtual" "environment (default is to use the one bundled in virtualenv)", ) parser.add_testenv_attribute( name="alwayscopy", type="bool", default=False, postprocess=alwayscopy, help="Set to ``True`` if you want virtualenv to always copy files rather " "than symlinking.", ) def pip_pre(testenv_config, value): return testenv_config.config.option.pre or value parser.add_testenv_attribute( name="pip_pre", type="bool", default=False, postprocess=pip_pre, help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ", ) def develop(testenv_config, value): option = testenv_config.config.option return not option.installpkg and (value or option.develop or option.devenv is not None) parser.add_testenv_attribute( name="usedevelop", type="bool", postprocess=develop, default=False, help="install package in develop/editable mode", ) parser.add_testenv_attribute_obj(InstallcmdOption()) parser.add_testenv_attribute( name="list_dependencies_command", type="argv", default="python -m pip freeze", help="list dependencies for a virtual environment", ) parser.add_testenv_attribute_obj(DepOption()) parser.add_testenv_attribute( name="commands", type="argvlist", default="", help="each line specifies a test command and can use substitution.", ) parser.add_testenv_attribute( name="commands_pre", type="argvlist", default="", help="each line specifies a setup command action and can use substitution.", ) parser.add_testenv_attribute( name="commands_post", type="argvlist", default="", help="each line specifies a teardown command and can use substitution.", ) parser.add_testenv_attribute( "ignore_outcome", type="bool", default=False, help="if set to True a failing result of this testenv will not make " "tox fail, only a warning will be produced", ) parser.add_testenv_attribute( "extras", type="line-list", help="list of extras to install with the source distribution or develop install", ) add_parallel_config(parser) def cli_skip_missing_interpreter(parser): class SkipMissingInterpreterAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): value = "true" if values is None else values if value not in ("config", "true", "false"): raise argparse.ArgumentTypeError("value must be config, true or false") setattr(namespace, self.dest, value) parser.add_argument( "-s", "--skip-missing-interpreters", default="config", metavar="val", nargs="?", action=SkipMissingInterpreterAction, help="don't fail tests for missing interpreters: {config,true,false} choice", ) class Config(object): """Global Tox config object.""" def __init__(self, pluginmanager, option, interpreters, parser, args): self.envconfigs = OrderedDict() """Mapping envname -> envconfig""" self.invocationcwd = py.path.local() self.interpreters = interpreters self.pluginmanager = pluginmanager self.option = option self._parser = parser self._testenv_attr = parser._testenv_attr self.args = args """option namespace containing all parsed command line options""" @property def homedir(self): homedir = get_homedir() if homedir is None: homedir = self.toxinidir # FIXME XXX good idea? return homedir class TestenvConfig: """Testenv Configuration object. In addition to some core attributes/properties this config object holds all per-testenv ini attributes as attributes, see "tox --help-ini" for an overview. """ def __init__(self, envname, config, factors, reader): #: test environment name self.envname = envname #: global tox config object self.config = config #: set of factors self.factors = factors self._reader = reader self._missing_subs = [] """Holds substitutions that could not be resolved. Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a problem if the env is not part of the current testrun. So we need to remember this and check later when the testenv is actually run and crash only then. """ def get_envbindir(self): """Path to directory where scripts/binaries reside.""" if tox.INFO.IS_WIN and "jython" not in self.basepython and "pypy" not in self.basepython: return self.envdir.join("Scripts") else: return self.envdir.join("bin") @property def envbindir(self): return self.get_envbindir() @property def envpython(self): """Path to python executable.""" return self.get_envpython() def get_envpython(self): """ path to python/jython executable. """ if "jython" in str(self.basepython): name = "jython" else: name = "python" return self.envbindir.join(name) def get_envsitepackagesdir(self): """Return sitepackagesdir of the virtualenv environment. NOTE: Only available during execution, not during parsing. """ x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir) return x @property def python_info(self): """Return sitepackagesdir of the virtualenv environment.""" return self.config.interpreters.get_info(envconfig=self) def getsupportedinterpreter(self): if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython: raise tox.exception.UnsupportedInterpreter( "Jython/Windows does not support installing scripts" ) info = self.config.interpreters.get_info(envconfig=self) if not info.executable: raise tox.exception.InterpreterNotFound(self.basepython) if not info.version_info: raise tox.exception.InvocationError( "Failed to get version_info for {}: {}".format(info.name, info.err) ) return info.executable testenvprefix = "testenv:" def get_homedir(): try: return py.path.local._gethomedir() except Exception: return None def make_hashseed(): max_seed = 4294967295 if tox.INFO.IS_WIN: max_seed = 1024 return str(random.randint(1, max_seed)) class ParseIni(object): def __init__(self, config, ini_path, ini_data): # noqa config.toxinipath = ini_path using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid())) config.toxinidir = config.toxinipath.dirpath() self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data) previous_line_of = self._cfg.lineof def line_of_default_to_zero(section, name=None): at = previous_line_of(section, name=name) if at is None: at = 0 return at self._cfg.lineof = line_of_default_to_zero config._cfg = self._cfg self.config = config prefix = "tox" if ini_path.basename == "setup.cfg" else None context_name = getcontextname() if context_name == "jenkins": reader = SectionReader( "tox:jenkins", self._cfg, prefix=prefix, fallbacksections=["tox"] ) dist_share_default = "{toxworkdir}/distshare" elif not context_name: reader = SectionReader("tox", self._cfg, prefix=prefix) dist_share_default = "{homedir}/.tox/distshare" else: raise ValueError("invalid context") if config.option.hashseed is None: hash_seed = make_hashseed() elif config.option.hashseed == "noset": hash_seed = None else: hash_seed = config.option.hashseed config.hashseed = hash_seed reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir) if config.option.workdir is None: config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox") else: config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True) if os.path.exists(str(config.toxworkdir)): config.toxworkdir = config.toxworkdir.realpath() reader.addsubstitutions(toxworkdir=config.toxworkdir) config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False) config.distdir = reader.getpath("distdir", "{toxworkdir}/dist") reader.addsubstitutions(distdir=config.distdir) config.distshare = reader.getpath("distshare", dist_share_default) config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp") reader.addsubstitutions(distshare=config.distshare) config.sdistsrc = reader.getpath("sdistsrc", None) config.setupdir = reader.getpath("setupdir", "{toxinidir}") config.logdir = config.toxworkdir.join("log") within_parallel = PARALLEL_ENV_VAR_KEY in os.environ if not within_parallel: ensure_empty_dir(config.logdir) # determine indexserver dictionary config.indexserver = {"default": IndexServerConfig("default")} prefix = "indexserver" for line in reader.getlist(prefix): name, url = map(lambda x: x.strip(), line.split("=", 1)) config.indexserver[name] = IndexServerConfig(name, url) if config.option.skip_missing_interpreters == "config": val = reader.getbool("skip_missing_interpreters", False) config.option.skip_missing_interpreters = "true" if val else "false" override = False if config.option.indexurl: for url_def in config.option.indexurl: m = re.match(r"\W*(\w+)=(\S+)", url_def) if m is None: url = url_def name = "default" else: name, url = m.groups() if not url: url = None if name != "ALL": config.indexserver[name].url = url else: override = url # let ALL override all existing entries if override: for name in config.indexserver: config.indexserver[name] = IndexServerConfig(name, override) self.handle_provision(config, reader) self.parse_build_isolation(config, reader) res = self._getenvdata(reader, config) config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res # factors used in config or predefined known_factors = self._list_section_factors("testenv") known_factors.update({"py", "python"}) # factors stated in config envlist stated_envlist = reader.getstring("envlist", replace=False) if stated_envlist: for env in _split_env(stated_envlist): known_factors.update(env.split("-")) # configure testenvs to_do = [] failures = OrderedDict() results = {} cur_self = self def run(name, section, subs, config): try: results[name] = cur_self.make_envconfig(name, section, subs, config) except Exception as exception: failures[name] = (exception, traceback.format_exc()) order = [] for name in all_envs: section = "{}{}".format(testenvprefix, name) factors = set(name.split("-")) if ( section in self._cfg or factors <= known_factors or all( tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors ) ): order.append(name) thread = Thread(target=run, args=(name, section, reader._subs, config)) thread.daemon = True thread.start() to_do.append(thread) for thread in to_do: while thread.is_alive(): thread.join(timeout=20) if failures: raise tox.exception.ConfigError( "\n".join( "{} failed with {} at {}".format(key, exc, trace) for key, (exc, trace) in failures.items() ) ) for name in order: config.envconfigs[name] = results[name] all_develop = all( name in config.envconfigs and config.envconfigs[name].usedevelop for name in config.envlist ) config.skipsdist = reader.getbool("skipsdist", all_develop) if config.option.devenv is not None: config.option.notest = True if config.option.devenv is not None and len(config.envlist) != 1: feedback("--devenv requires only a single -e", sysexit=True) def handle_provision(self, config, reader): requires_list = reader.getlist("requires") config.minversion = reader.getstring("minversion", None) config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox") min_version = "tox >= {}".format(config.minversion or tox.__version__) deps = self.ensure_requires_satisfied(config, requires_list, min_version) if config.run_provision: section_name = "testenv:{}".format(name) if section_name not in self._cfg.sections: self._cfg.sections[section_name] = {} self._cfg.sections[section_name]["description"] = "meta tox" env_config = self.make_envconfig( name, "{}{}".format(testenvprefix, name), reader._subs, config ) env_config.deps = deps config.envconfigs[config.provision_tox_env] = env_config raise tox.exception.MissingRequirement(config) # if provisioning is not on, now we need do a strict argument evaluation # raise on unknown args self.config._parser.parse_cli(args=self.config.args, strict=True) @staticmethod def ensure_requires_satisfied(config, requires, min_version): missing_requirements = [] failed_to_parse = False deps = [] exists = set() for require in requires + [min_version]: # noinspection PyBroadException try: package = requirements.Requirement(require) package_name = canonicalize_name(package.name) if package_name not in exists: deps.append(DepConfig(require, None)) exists.add(package_name) dist = importlib_metadata.distribution(package_name) if not package.specifier.contains(dist.version, prereleases=True): raise MissingDependency(package) except requirements.InvalidRequirement as exception: failed_to_parse = True error("failed to parse {!r}".format(exception)) except Exception as exception: verbosity1("could not satisfy requires {!r}".format(exception)) missing_requirements.append(str(requirements.Requirement(require))) if failed_to_parse: raise tox.exception.BadRequirement() config.run_provision = bool(len(missing_requirements)) return deps def parse_build_isolation(self, config, reader): config.isolated_build = reader.getbool("isolated_build", False) config.isolated_build_env = reader.getstring("isolated_build_env", ".package") if config.isolated_build is True: name = config.isolated_build_env section_name = "testenv:{}".format(name) if section_name not in self._cfg.sections: self._cfg.sections[section_name] = {} self._cfg.sections[section_name]["deps"] = "" self._cfg.sections[section_name]["sitepackages"] = "False" self._cfg.sections[section_name]["description"] = "isolated packaging environment" config.envconfigs[name] = self.make_envconfig( name, "{}{}".format(testenvprefix, name), reader._subs, config ) def _list_section_factors(self, section): factors = set() if section in self._cfg: for _, value in self._cfg[section].items(): exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M) factors.update(*mapcat(_split_factor_expr_all, exprs)) return factors def make_envconfig(self, name, section, subs, config, replace=True): factors = set(name.split("-")) reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors) tc = TestenvConfig(name, config, factors, reader) reader.addsubstitutions( envname=name, envbindir=tc.get_envbindir, envsitepackagesdir=tc.get_envsitepackagesdir, envpython=tc.get_envpython, **subs ) for env_attr in config._testenv_attr: atype = env_attr.type try: if atype in ("bool", "path", "string", "dict", "dict_setenv", "argv", "argvlist"): meth = getattr(reader, "get{}".format(atype)) res = meth(env_attr.name, env_attr.default, replace=replace) elif atype == "basepython": no_fallback = name in (config.provision_tox_env,) res = reader.getstring( env_attr.name, env_attr.default, replace=replace, no_fallback=no_fallback ) elif atype == "space-separated-list": res = reader.getlist(env_attr.name, sep=" ") elif atype == "line-list": res = reader.getlist(env_attr.name, sep="\n") elif atype == "env-list": res = reader.getstring(env_attr.name, replace=False) res = tuple(_split_env(res)) else: raise ValueError("unknown type {!r}".format(atype)) if env_attr.postprocess: res = env_attr.postprocess(testenv_config=tc, value=res) except tox.exception.MissingSubstitution as e: tc._missing_subs.append(e.name) res = e.FLAG setattr(tc, env_attr.name, res) if atype in ("path", "string", "basepython"): reader.addsubstitutions(**{env_attr.name: res}) return tc def _getallenvs(self, reader, extra_env_list=None): extra_env_list = extra_env_list or [] env_str = reader.getstring("envlist", replace=False) env_list = _split_env(env_str) for env in extra_env_list: if env not in env_list: env_list.append(env) all_envs = OrderedDict((i, None) for i in env_list) for section in self._cfg: if section.name.startswith(testenvprefix): all_envs[section.name[len(testenvprefix) :]] = None if not all_envs: all_envs["python"] = None return list(all_envs.keys()) def _getenvdata(self, reader, config): from_option = self.config.option.env from_environ = os.environ.get("TOXENV") from_config = reader.getstring("envlist", replace=False) env_list = [] envlist_explicit = False if (from_option and "ALL" in from_option) or ( not from_option and from_environ and "ALL" in from_environ.split(",") ): all_envs = self._getallenvs(reader) else: candidates = ( (os.environ.get(PARALLEL_ENV_VAR_KEY), True), (from_option, True), (from_environ, True), ("py" if self.config.option.devenv is not None else None, False), (from_config, False), ) env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False)) env_list = _split_env(env_str) all_envs = self._getallenvs(reader, env_list) if not env_list: env_list = all_envs package_env = config.isolated_build_env if config.isolated_build is True and package_env in all_envs: all_envs.remove(package_env) if config.isolated_build is True and package_env in env_list: msg = "isolated_build_env {} cannot be part of envlist".format(package_env) raise tox.exception.ConfigError(msg) return env_list, all_envs, _split_env(from_config), envlist_explicit def _split_env(env): """if handed a list, action="append" was used for -e """ if env is None: return [] if not isinstance(env, list): env = [e.split("#", 1)[0].strip() for e in env.split("\n")] env = ",".join([e for e in env if e]) env = [env] return mapcat(_expand_envstr, env) def _is_negated_factor(factor): return factor.startswith("!") def _base_factor_name(factor): return factor[1:] if _is_negated_factor(factor) else factor def _split_factor_expr(expr): def split_single(e): raw = e.split("-") included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)} excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)} return included, excluded partial_envs = _expand_envstr(expr) return [split_single(e) for e in partial_envs] def _split_factor_expr_all(expr): partial_envs = _expand_envstr(expr) return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs] def _expand_envstr(envstr): # split by commas not in groups tokens = re.split(r"((?:\{[^}]+\})+)|,", envstr) envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k] def expand(env): tokens = re.split(r"\{([^}]+)\}", env) parts = [re.sub(r"\s+", "", token).split(",") for token in tokens] return ["".join(variant) for variant in itertools.product(*parts)] return mapcat(expand, envlist) def mapcat(f, seq): return list(itertools.chain.from_iterable(map(f, seq))) class DepConfig: def __init__(self, name, indexserver=None): self.name = name self.indexserver = indexserver def __repr__(self): if self.indexserver: if self.indexserver.name == "default": return self.name return ":{}:{}".format(self.indexserver.name, self.name) return str(self.name) class IndexServerConfig: def __init__(self, name, url=None): self.name = name self.url = url def __repr__(self): return "IndexServerConfig(name={}, url={})".format(self.name, self.url) is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match """Check value matches substitution form of referencing value from other section. E.g. {[base]commands} """ class SectionReader: def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None): if prefix is None: self.section_name = section_name else: self.section_name = "{}:{}".format(prefix, section_name) self._cfg = cfgparser self.fallbacksections = fallbacksections or [] self.factors = factors self._subs = {} self._subststack = [] self._setenv = None def get_environ_value(self, name): if self._setenv is None: return os.environ.get(name) return self._setenv.get(name) def addsubstitutions(self, _posargs=None, **kw): self._subs.update(kw) if _posargs: self.posargs = _posargs def getpath(self, name, defaultpath, replace=True): path = self.getstring(name, defaultpath, replace=replace) if path is not None: toxinidir = self._subs["toxinidir"] return toxinidir.join(path, abs=True) def getlist(self, name, sep="\n"): s = self.getstring(name, None) if s is None: return [] return [x.strip() for x in s.split(sep) if x.strip()] def getdict(self, name, default=None, sep="\n", replace=True): value = self.getstring(name, None, replace=replace) return self._getdict(value, default=default, sep=sep, replace=replace) def getdict_setenv(self, name, default=None, sep="\n", replace=True): value = self.getstring(name, None, replace=replace, crossonly=True) definitions = self._getdict(value, default=default, sep=sep, replace=replace) self._setenv = SetenvDict(definitions, reader=self) return self._setenv def _getdict(self, value, default, sep, replace=True): if value is None or not replace: return default or {} d = {} for line in value.split(sep): if line.strip(): name, rest = line.split("=", 1) d[name.strip()] = rest.strip() return d def getbool(self, name, default=None, replace=True): s = self.getstring(name, default, replace=replace) if not s or not replace: s = default if s is None: raise KeyError("no config value [{}] {} found".format(self.section_name, name)) if not isinstance(s, bool): if s.lower() == "true": s = True elif s.lower() == "false": s = False else: raise tox.exception.ConfigError( "{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s) ) return s def getargvlist(self, name, default="", replace=True): s = self.getstring(name, default, replace=False) return _ArgvlistReader.getargvlist(self, s, replace=replace) def getargv(self, name, default="", replace=True): return self.getargvlist(name, default, replace=replace)[0] def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False): x = None sections = [self.section_name] + ([] if no_fallback else self.fallbacksections) for s in sections: try: x = self._cfg[s][name] break except KeyError: continue if x is None: x = default else: # It is needed to apply factors before unwrapping # dependencies, otherwise it can break the substitution # process. Once they are unwrapped, we call apply factors # again for those new dependencies. x = self._apply_factors(x) x = self._replace_if_needed(x, name, replace, crossonly) x = self._apply_factors(x) x = self._replace_if_needed(x, name, replace, crossonly) return x def _replace_if_needed(self, x, name, replace, crossonly): if replace and x and hasattr(x, "replace"): x = self._replace(x, name=name, crossonly=crossonly) return x def _apply_factors(self, s): def factor_line(line): m = re.search(r"^([\w{}\.!,-]+)\:\s+(.+)", line) if not m: return line expr, line = m.groups() if any( included <= self.factors and not any(x in self.factors for x in excluded) for included, excluded in _split_factor_expr(expr) ): return line lines = s.strip().splitlines() return "\n".join(filter(None, map(factor_line, lines))) def _replace(self, value, name=None, section_name=None, crossonly=False): if "{" not in value: return value section_name = section_name if section_name else self.section_name self._subststack.append((section_name, name)) try: replaced = Replacer(self, crossonly=crossonly).do_replace(value) assert self._subststack.pop() == (section_name, name) except tox.exception.MissingSubstitution: if not section_name.startswith(testenvprefix): raise tox.exception.ConfigError( "substitution env:{!r}: unknown or recursive definition in" " section {!r}.".format(value, section_name) ) raise return replaced class Replacer: RE_ITEM_REF = re.compile( r""" (?<!\\)[{] (?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules (?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key (?::(?P<default_value>[^{}]*))? # default value [}] """, re.VERBOSE, ) def __init__(self, reader, crossonly=False): self.reader = reader self.crossonly = crossonly def do_replace(self, value): """ Recursively expand substitutions starting from the innermost expression """ def substitute_once(x): return self.RE_ITEM_REF.sub(self._replace_match, x) expanded = substitute_once(value) while expanded != value: # substitution found value = expanded expanded = substitute_once(value) return expanded def _replace_match(self, match): g = match.groupdict() sub_value = g["substitution_value"] if self.crossonly: if sub_value.startswith("["): return self._substitute_from_other_section(sub_value) # in crossonly we return all other hits verbatim start, end = match.span() return match.string[start:end] # special case: all empty values means ":" which is os.pathsep if not any(g.values()): return os.pathsep # special case: opts and packages. Leave {opts} and # {packages} intact, they are replaced manually in # _venv.VirtualEnv.run_install_command. if sub_value in ("opts", "packages"): return "{{{}}}".format(sub_value) try: sub_type = g["sub_type"] except KeyError: raise tox.exception.ConfigError( "Malformed substitution; no substitution type provided" ) if sub_type == "env": return self._replace_env(match) if sub_type == "tty": if is_interactive(): return match.group("substitution_value") return match.group("default_value") if sub_type is not None: raise tox.exception.ConfigError( "No support for the {} substitution type".format(sub_type) ) return self._replace_substitution(match) def _replace_env(self, match): key = match.group("substitution_value") if not key: raise tox.exception.ConfigError("env: requires an environment variable name") default = match.group("default_value") value = self.reader.get_environ_value(key) if value is not None: return value if default is not None: return default raise tox.exception.MissingSubstitution(key) def _substitute_from_other_section(self, key): if key.startswith("[") and "]" in key: i = key.find("]") section, item = key[1:i], key[i + 1 :] cfg = self.reader._cfg if section in cfg and item in cfg[section]: if (section, item) in self.reader._subststack: raise ValueError( "{} already in {}".format((section, item), self.reader._subststack) ) x = str(cfg[section][item]) return self.reader._replace( x, name=item, section_name=section, crossonly=self.crossonly ) raise tox.exception.ConfigError("substitution key {!r} not found".format(key)) def _replace_substitution(self, match): sub_key = match.group("substitution_value") val = self.reader._subs.get(sub_key, None) if val is None: val = self._substitute_from_other_section(sub_key) if callable(val): val = val() return str(val) def is_interactive(): return sys.stdin.isatty() class _ArgvlistReader: @classmethod def getargvlist(cls, reader, value, replace=True): """Parse ``commands`` argvlist multiline string. :param SectionReader reader: reader to be used. :param str value: Content stored by key. :rtype: list[list[str]] :raise :class:`tox.exception.ConfigError`: line-continuation ends nowhere while resolving for specified section """ commands = [] current_command = "" for line in value.splitlines(): line = line.rstrip() if not line: continue if line.endswith("\\"): current_command += " {}".format(line[:-1]) continue current_command += line if is_section_substitution(current_command): replaced = reader._replace(current_command, crossonly=True) commands.extend(cls.getargvlist(reader, replaced)) else: commands.append(cls.processcommand(reader, current_command, replace)) current_command = "" else: if current_command: raise tox.exception.ConfigError( "line-continuation ends nowhere while resolving for [{}] {}".format( reader.section_name, "commands" ) ) return commands @classmethod def processcommand(cls, reader, command, replace=True): posargs = getattr(reader, "posargs", "") if sys.platform.startswith("win"): posargs_string = list2cmdline([x for x in posargs if x]) else: posargs_string = " ".join([shlex_quote(x) for x in posargs if x]) # Iterate through each word of the command substituting as # appropriate to construct the new command string. This # string is then broken up into exec argv components using # shlex. if replace: newcommand = "" for word in CommandParser(command).words(): if word == "{posargs}" or word == "[]": newcommand += posargs_string continue elif word.startswith("{posargs:") and word.endswith("}"): if posargs: newcommand += posargs_string continue else: word = word[9:-1] new_arg = "" new_word = reader._replace(word) new_word = reader._replace(new_word) new_word = new_word.replace("\\{", "{").replace("\\}", "}") new_arg += new_word newcommand += new_arg else: newcommand = command # Construct shlex object that will not escape any values, # use all values as is in argv. shlexer = shlex.shlex(newcommand, posix=True) shlexer.whitespace_split = True shlexer.escape = "" return list(shlexer) class CommandParser(object): class State(object): def __init__(self): self.word = "" self.depth = 0 self.yield_words = [] def __init__(self, command): self.command = command def words(self): ps = CommandParser.State() def word_has_ended(): return ( ( cur_char in string.whitespace and ps.word and ps.word[-1] not in string.whitespace ) or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\")) or (ps.depth == 0 and ps.word and ps.word[-1] == "}") or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "") ) def yield_this_word(): yieldword = ps.word ps.word = "" if yieldword: ps.yield_words.append(yieldword) def yield_if_word_ended(): if word_has_ended(): yield_this_word() def accumulate(): ps.word += cur_char def push_substitution(): ps.depth += 1 def pop_substitution(): ps.depth -= 1 for cur_char in self.command: if cur_char in string.whitespace: if ps.depth == 0: yield_if_word_ended() accumulate() elif cur_char == "{": yield_if_word_ended() accumulate() push_substitution() elif cur_char == "}": accumulate() pop_substitution() else: yield_if_word_ended() accumulate() if ps.word.strip(): yield_this_word() return ps.yield_words def getcontextname(): if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]): return "jenkins" return None
consumers.py
# chat/consumers.py from channels.generic.websocket import AsyncWebsocketConsumer, WebsocketConsumer import json from dl.FaceEngine import FaceEngine import asyncio import time from threading import Thread import cv2 import redis from django.conf import settings from Share import helper class AsyncImageConsumer(AsyncWebsocketConsumer): async def connect(self): self.faceEngine = FaceEngine() # self.faceEngine.openCapture(0) await self.accept() async def disconnect(self, code): print("close disconnect.") if self.faceEngine: self.faceEngine.closeCapture() # Receive message from WebSocket async def receive(self, text_data): data = text_data data = json.loads(data) print(data) if "requestVal" not in data: await self.send(json.dumps({"retVal": -1})) return id = data["id"] # if not id.startswith("rtsp://"): # await self.send(json.dumps({"retVal": -10})) # return id = int(id) open = self.faceEngine.openCapture(id) loop = asyncio.get_event_loop() aa = await loop.run_in_executor(None, self.sendFrame) print(aa) # loop = asyncio.get_event_loop() # loop.run_until_complete(self.sendFrame(loop)) # loop.close() # self.t=Thread(target=self.sendFrame) # self.t.daemon=True # self.t.start() # print(open) # while True: # res = self.faceEngine.faceFrame() # await self.send( # text_data=json.dumps(res)) # await asyncio.sleep(0.01) def image_message(self, event): print(event["message"]) async def sendFrame(self): print(111111111) while True: res = self.faceEngine.faceFrame() await self.send( text_data=json.dumps(res)) await asyncio.sleep(0.05) class ImageConsumer(WebsocketConsumer): def connect(self): self.group_name = "ImageGroup" self.faceEngine = FaceEngine() # self.faceEngine.openCapture(0) self.accept() def disconnect(self, code): print("close disconnect.") if self.faceEngine: self.faceEngine.closeCapture() # Receive message from WebSocket def receive(self, text_data): data = text_data data = json.loads(data) print(data) if "requestVal" not in data: self.send(json.dumps({"retVal": -1})) return id = data["id"] try: id = int(id) except: pass open = self.faceEngine.openCapture(id) self.t = Thread(target=self.sendFrame) self.t.daemon = True self.t.start() def image_message(self, event): print(event["message"]) def sendFrame(self): while True: res = self.faceEngine.faceFrame() self.send( text_data=json.dumps(res)) time.sleep(0.05) class FaceRecognitionConsumer(WebsocketConsumer): def connect(self): self.r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB) self.stream = "FRStream" self.ids = [] self.t = Thread(target=self.getData) self.t.daemon = True self.t.start() self.accept() def disconnect(self, code): pass # Receive message from WebSocket def receive(self, text_data): data = text_data data = json.loads(data) image = data.get("image") try: # If send from browser img_data = image.split(",")[-1] except: # Send from base64 img_data = image try: img = helper.base64_decode_image(img_data) assert img is not None except: return; bbox = data.get("face") detected = helper.data_cast(data["detected"]) if "detected" in data else 1 d = { "type": "Recognition", "image": img_data, "name": "", "detected": detected, "recognize": 1, "face": bbox, "savePic": 0, "saveVec": 0 } # db.rpush(settings.IMAGE_QUEUE, json.dumps(d)) id = self.r.xadd(self.stream, {'data': json.dumps(d)}) self.ids.append(id) def getData(self): while True: # attempt to grab the output predictions for id in self.ids: output = self.r.get(id) # check to see if our model has classified the input # image if output is not None: # add the output predictions to our data # dictionary so we can return it to the client output = output.decode("utf-8") output = json.loads(output) self.send( text_data=json.dumps(output)) # delete the result from the database and break # from the polling loop self.r.delete(id) self.r.xdel(self.stream, id) self.ids.remove(id) break # sleep for a small amount to give the model a chance # to classify the input image time.sleep(settings.CLIENT_SLEEP)